repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
YihaoLu/statsmodels | statsmodels/sandbox/stats/stats_mstats_short.py | 34 | 14910 | '''get versions of mstats percentile functions that also work with non-masked arrays
uses dispatch to mstats version for difficult cases:
- data is masked array
- data requires nan handling (masknan=True)
- data should be trimmed (limit is non-empty)
handle simple cases directly, which doesn't require apply_along_axis
changes compared to mstats: plotting_positions for n-dim with axis argument
addition: plotting_positions_w1d: with weights, 1d ndarray only
TODO:
consistency with scipy.stats versions not checked
docstrings from mstats not updated yet
code duplication, better solutions (?)
convert examples to tests
rename alphap, betap for consistency
timing question: one additional argsort versus apply_along_axis
weighted plotting_positions
- I haven't figured out nd version of weighted plotting_positions
- add weighted quantiles
'''
from __future__ import print_function
import numpy as np
from numpy import ma
from scipy import stats
#from numpy.ma import nomask
#####--------------------------------------------------------------------------
#---- --- Percentiles ---
#####--------------------------------------------------------------------------
def quantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
limit=(), masknan=False):
"""
Computes empirical quantiles for a data array.
Samples quantile are defined by :math:`Q(p) = (1-g).x[i] +g.x[i+1]`,
where :math:`x[j]` is the *j*th order statistic, and
`i = (floor(n*p+m))`, `m=alpha+p*(1-alpha-beta)` and `g = n*p + m - i`.
Typical values of (alpha,beta) are:
- (0,1) : *p(k) = k/n* : linear interpolation of cdf (R, type 4)
- (.5,.5) : *p(k) = (k+1/2.)/n* : piecewise linear
function (R, type 5)
- (0,0) : *p(k) = k/(n+1)* : (R type 6)
- (1,1) : *p(k) = (k-1)/(n-1)*. In this case, p(k) = mode[F(x[k])].
That's R default (R type 7)
- (1/3,1/3): *p(k) = (k-1/3)/(n+1/3)*. Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): *p(k) = (k-3/8)/(n+1/4)*. Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM ?? JP
- (0.35, 0.65): PWM ?? JP p(k) = (k-0.35)/n
Parameters
----------
a : array-like
Input data, as a sequence or array of dimension at most 2.
prob : array-like, optional
List of quantiles to compute.
alpha : float, optional
Plotting positions parameter, default is 0.4.
beta : float, optional
Plotting positions parameter, default is 0.4.
axis : int, optional
Axis along which to perform the trimming.
If None (default), the input array is first flattened.
limit : tuple
Tuple of (lower, upper) values.
Values of `a` outside this closed interval are ignored.
Returns
-------
quants : MaskedArray
An array containing the calculated quantiles.
Examples
--------
>>> from scipy.stats.mstats import mquantiles
>>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.])
>>> mquantiles(a)
array([ 19.2, 40. , 42.8])
Using a 2D array, specifying axis and limit.
>>> data = np.array([[ 6., 7., 1.],
[ 47., 15., 2.],
[ 49., 36., 3.],
[ 15., 39., 4.],
[ 42., 40., -999.],
[ 41., 41., -999.],
[ 7., -999., -999.],
[ 39., -999., -999.],
[ 43., -999., -999.],
[ 40., -999., -999.],
[ 36., -999., -999.]])
>>> mquantiles(data, axis=0, limit=(0, 50))
array([[ 19.2 , 14.6 , 1.45],
[ 40. , 37.5 , 2.5 ],
[ 42.8 , 40.05, 3.55]])
>>> data[:, 2] = -999.
>>> mquantiles(data, axis=0, limit=(0, 50))
masked_array(data =
[[19.2 14.6 --]
[40.0 37.5 --]
[42.8 40.05 --]],
mask =
[[False False True]
[False False True]
[False False True]],
fill_value = 1e+20)
"""
if isinstance(a, np.ma.MaskedArray):
return stats.mstats.mquantiles(a, prob=prob, alphap=alphap, betap=alphap, axis=axis,
limit=limit)
if limit:
marr = stats.mstats.mquantiles(a, prob=prob, alphap=alphap, betap=alphap, axis=axis,
limit=limit)
return ma.filled(marr, fill_value=np.nan)
if masknan:
nanmask = np.isnan(a)
if nanmask.any():
marr = ma.array(a, mask=nanmask)
marr = stats.mstats.mquantiles(marr, prob=prob, alphap=alphap, betap=alphap,
axis=axis, limit=limit)
return ma.filled(marr, fill_value=np.nan)
# Initialization & checks ---------
data = np.asarray(a)
p = np.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
isrolled = False
#from _quantiles1d
if (axis is None):
data = data.ravel() #reshape(-1,1)
axis = 0
else:
axis = np.arange(data.ndim)[axis]
data = np.rollaxis(data, axis)
isrolled = True # keep track, maybe can be removed
x = np.sort(data, axis=0)
n = x.shape[0]
returnshape = list(data.shape)
returnshape[axis] = p
#TODO: check these
if n == 0:
return np.empty(len(p), dtype=float)
elif n == 1:
return np.resize(x, p.shape)
aleph = (n*p + m)
k = np.floor(aleph.clip(1, n-1)).astype(int)
ind = [None]*x.ndim
ind[0] = slice(None)
gamma = (aleph-k).clip(0,1)[ind]
q = (1.-gamma)*x[k-1] + gamma*x[k]
if isrolled:
return np.rollaxis(q, 0, axis+1)
else:
return q
def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4, axis=0, masknan=None):
"""Calculate the score at the given 'per' percentile of the
sequence a. For example, the score at per=50 is the median.
This function is a shortcut to mquantile
"""
per = np.asarray(per, float)
if (per < 0).any() or (per > 100.).any():
raise ValueError("The percentile should be between 0. and 100. !"\
" (got %s)" % per)
return quantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
limit=limit, axis=axis, masknan=masknan).squeeze()
def plotting_positions(data, alpha=0.4, beta=0.4, axis=0, masknan=False):
"""Returns the plotting positions (or empirical percentile points) for the
data.
Plotting positions are defined as (i-alpha)/(n+1-alpha-beta), where:
- i is the rank order statistics (starting at 1)
- n is the number of unmasked values along the given axis
- alpha and beta are two parameters.
Typical values for alpha and beta are:
- (0,1) : *p(k) = k/n* : linear interpolation of cdf (R, type 4)
- (.5,.5) : *p(k) = (k-1/2.)/n* : piecewise linear function (R, type 5)
(Bliss 1967: "Rankit")
- (0,0) : *p(k) = k/(n+1)* : Weibull (R type 6), (Van der Waerden 1952)
- (1,1) : *p(k) = (k-1)/(n-1)*. In this case, p(k) = mode[F(x[k])].
That's R default (R type 7)
- (1/3,1/3): *p(k) = (k-1/3)/(n+1/3)*. Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8), (Tukey 1962)
- (3/8,3/8): *p(k) = (k-3/8)/(n+1/4)*.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9) (Blom 1958)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
Parameters
----------
x : sequence
Input data, as a sequence or array of dimension at most 2.
prob : sequence
List of quantiles to compute.
alpha : {0.4, float} optional
Plotting positions parameter.
beta : {0.4, float} optional
Plotting positions parameter.
Notes
-----
I think the adjustments assume that there are no ties in order to be a reasonable
approximation to a continuous density function. TODO: check this
References
----------
unknown,
dates to original papers from Beasley, Erickson, Allison 2009 Behav Genet
"""
if isinstance(data, np.ma.MaskedArray):
if axis is None or data.ndim == 1:
return stats.mstats.plotting_positions(data, alpha=alpha, beta=beta)
else:
return ma.apply_along_axis(stats.mstats.plotting_positions, axis, data, alpha=alpha, beta=beta)
if masknan:
nanmask = np.isnan(data)
if nanmask.any():
marr = ma.array(data, mask=nanmask)
#code duplication:
if axis is None or data.ndim == 1:
marr = stats.mstats.plotting_positions(marr, alpha=alpha, beta=beta)
else:
marr = ma.apply_along_axis(stats.mstats.plotting_positions, axis, marr, alpha=alpha, beta=beta)
return ma.filled(marr, fill_value=np.nan)
data = np.asarray(data)
if data.size == 1: # use helper function instead
data = np.atleast_1d(data)
axis = 0
if axis is None:
data = data.ravel()
axis = 0
n = data.shape[axis]
if data.ndim == 1:
plpos = np.empty(data.shape, dtype=float)
plpos[data.argsort()] = (np.arange(1,n+1) - alpha)/(n+1.-alpha-beta)
else:
#nd assignment instead of second argsort doesn't look easy
plpos = (data.argsort(axis).argsort(axis) + 1. - alpha)/(n+1.-alpha-beta)
return plpos
meppf = plotting_positions
def plotting_positions_w1d(data, weights=None, alpha=0.4, beta=0.4,
method='notnormed'):
'''Weighted plotting positions (or empirical percentile points) for the data.
observations are weighted and the plotting positions are defined as
(ws-alpha)/(n-alpha-beta), where:
- ws is the weighted rank order statistics or cumulative weighted sum,
normalized to n if method is "normed"
- n is the number of values along the given axis if method is "normed"
and total weight otherwise
- alpha and beta are two parameters.
wtd.quantile in R package Hmisc seems to use the "notnormed" version.
notnormed coincides with unweighted segment in example, drop "normed" version ?
See Also
--------
plotting_positions : unweighted version that works also with more than one
dimension and has other options
'''
x = np.atleast_1d(data)
if x.ndim > 1:
raise ValueError('currently implemented only for 1d')
if weights is None:
weights = np.ones(x.shape)
else:
weights = np.array(weights, float, copy=False, ndmin=1) #atleast_1d(weights)
if weights.shape != x.shape:
raise ValueError('if weights is given, it needs to be the same'
'shape as data')
n = len(x)
xargsort = x.argsort()
ws = weights[xargsort].cumsum()
res = np.empty(x.shape)
if method == 'normed':
res[xargsort] = (1.*ws/ws[-1]*n-alpha)/(n+1.-alpha-beta)
else:
res[xargsort] = (1.*ws-alpha)/(ws[-1]+1.-alpha-beta)
return res
def edf_normal_inverse_transformed(x, alpha=3./8, beta=3./8, axis=0):
'''rank based normal inverse transformed cdf
'''
from scipy import stats
ranks = plotting_positions(data, alpha=alpha, beta=alpha, axis=0, masknan=False)
ranks_transf = stats.norm.ppf(ranks)
return ranks_transf
if __name__ == '__main__':
x = np.arange(5)
print(plotting_positions(x))
x = np.arange(10).reshape(-1,2)
print(plotting_positions(x))
print(quantiles(x, axis=0))
print(quantiles(x, axis=None))
print(quantiles(x, axis=1))
xm = ma.array(x)
x2 = x.astype(float)
x2[1,0] = np.nan
print(plotting_positions(xm, axis=0))
# test 0d, 1d
for sl1 in [slice(None), 0]:
print((plotting_positions(xm[sl1,0]) == plotting_positions(x[sl1,0])).all())
print((quantiles(xm[sl1,0]) == quantiles(x[sl1,0])).all())
print((stats.mstats.mquantiles(ma.fix_invalid(x2[sl1,0])) == quantiles(x2[sl1,0], masknan=1)).all())
#test 2d
for ax in [0, 1, None, -1]:
print((plotting_positions(xm, axis=ax) == plotting_positions(x, axis=ax)).all())
print((quantiles(xm, axis=ax) == quantiles(x, axis=ax)).all())
print((stats.mstats.mquantiles(ma.fix_invalid(x2), axis=ax) == quantiles(x2, axis=ax, masknan=1)).all())
#stats version doesn't have axis
print((stats.mstats.plotting_positions(ma.fix_invalid(x2)) == plotting_positions(x2, axis=None, masknan=1)).all())
#test 3d
x3 = np.dstack((x,x)).T
for ax in [1,2]:
print((plotting_positions(x3, axis=ax)[0] == plotting_positions(x.T, axis=ax-1)).all())
np.testing.assert_equal(plotting_positions(np.arange(10), alpha=0.35, beta=1-0.35), (1+np.arange(10)-0.35)/10)
np.testing.assert_equal(plotting_positions(np.arange(10), alpha=0.4, beta=0.4), (1+np.arange(10)-0.4)/(10+0.2))
np.testing.assert_equal(plotting_positions(np.arange(10)), (1+np.arange(10)-0.4)/(10+0.2))
print('')
print(scoreatpercentile(x, [10,90]))
print(plotting_positions_w1d(x[:,0]))
print((plotting_positions_w1d(x[:,0]) == plotting_positions(x[:,0])).all())
#weights versus replicating multiple occurencies of same x value
w1 = [1, 1, 2, 1, 1]
plotexample = 1
if plotexample:
import matplotlib.pyplot as plt
plt.figure()
plt.title('ppf, cdf values on horizontal axis')
plt.step(plotting_positions_w1d(x[:,0], weights=w1, method='0'), x[:,0], where='post')
plt.step(stats.mstats.plotting_positions(np.repeat(x[:,0],w1,axis=0)),np.repeat(x[:,0],w1,axis=0),where='post')
plt.plot(plotting_positions_w1d(x[:,0], weights=w1, method='0'), x[:,0], '-o')
plt.plot(stats.mstats.plotting_positions(np.repeat(x[:,0],w1,axis=0)),np.repeat(x[:,0],w1,axis=0), '-o')
plt.figure()
plt.title('cdf, cdf values on vertical axis')
plt.step(x[:,0], plotting_positions_w1d(x[:,0], weights=w1, method='0'),where='post')
plt.step(np.repeat(x[:,0],w1,axis=0), stats.mstats.plotting_positions(np.repeat(x[:,0],w1,axis=0)),where='post')
plt.plot(x[:,0], plotting_positions_w1d(x[:,0], weights=w1, method='0'), '-o')
plt.plot(np.repeat(x[:,0],w1,axis=0), stats.mstats.plotting_positions(np.repeat(x[:,0],w1,axis=0)), '-o')
plt.show()
| bsd-3-clause |
vandegu/umich | eckert_iv_plot.py | 1 | 2413 | # This script will produce:
#
# 1. Colorfill of desired oceanic data in Eckert IV projection (Equal-Area, but with some distortion; easy to look at, however).
# 2. Masked land, so only ocean data shows up.
#
from mpl_toolkits.basemap import Basemap,shiftgrid,interp
import numpy as np
import matplotlib.pyplot as plt
import netCDF4
from scipy.interpolate import griddata as griddata2
import numpy.ma as ma
# Which file and what vertical level do you wish to plot from?
lvl = 55
file = '../data/onexone/mini/MAA_B1850C4CN_f19_g16_cret_4x_sewall.pop.h.0500.nc.1x1.nc'
f = netCDF4.Dataset(file)
#print(f.variables)
u = f.variables['UVEL'][0,lvl,:,:]
v = f.variables['VVEL'][0,lvl,:,:]
w = f.variables['WVEL'][0,lvl,:,:]
pt = f.variables['TEMP'][0,lvl,:,:]
s = f.variables['SALT'][:][0,lvl,:,:]
pd = f.variables['PD'][:]
iage = f.variables['IAGE'][0,lvl,:,:]
print(u.shape)
lat = f.variables['lat'][:]
lon = f.variables['lon'][:]
z_t = f.variables['z_t'][:]
x,y = np.meshgrid(lon,lat)
print(z_t[lvl]/100.0) # To display depth level in m
fig = plt.figure(figsize=[18,12])
ax = fig.add_subplot(111)
#m = Basemap(projection='mbtfpq',lon_0=0,resolution=None)
m = Basemap(projection='eck4',lon_0=0,resolution='c')
xxold,yyold = m(x,y)
# generate a grid that is equally spaced in a plot with the current pojection
lons,lats,xxnew,yynew = m.makegrid(500,500,returnxy=True)
#print(lon.shape)
#print(xx.flatten().shape,yy.flatten().shape,u.flatten().shape)
# project the data onto the new grid
iagenew = griddata2((xxold.ravel(),yyold.ravel()),pt.ravel(),(xxnew,yynew), method = 'linear')
iagenew = ma.masked_where((iagenew > 100000), iagenew)
unew = griddata2((xxold.ravel(),yyold.ravel()),u.ravel(),(xxnew,yynew), method = 'linear')
unew = ma.masked_where((unew > 100000), unew)
vnew = griddata2((xxold.ravel(),yyold.ravel()),v.ravel(),(xxnew,yynew), method = 'linear')
vnew = ma.masked_where((vnew > 100000), vnew)
sc1 = m.scatter(xxnew,yynew,c=iagenew,edgecolor='None',s=5,cmap='jet')
#m.streamplot(xxnew,yynew,unew,vnew,density=3,arrowsize=2,arrowstyle='-|>',color='black')
m.drawmeridians(np.arange(0,360,30))
m.drawparallels(np.arange(-90,90,30))
layer = '%3.0fm'%(z_t[lvl]/100)
cb = plt.colorbar(orientation='horizontal',extend='both')
cb.set_label('$Potential\/\/Temperature\/\/(K)$',size=20)
cb.ax.tick_params(labelsize=16)
ax.set_title('$MAA\/\/4x\/\/PI\/\/CO_2:\/\/\/\/%s$'%layer,size=24)
plt.show()
| mit |
JoeBartelmo/PyDetect | gui/VisualConstants.py | 2 | 4134 | import matplotlib.pyplot as plt
import matplotlib.patches as patches
class MARS_PRIMARY(object):
"""
1) RunClock,SystemVoltage
2) RunCLock,BatteryRemaining
3) RunClock,TotalDisplacement
4) IBeam,TotalDisplacement
5) SetSpeed,RPM
6) RPM,Speed
"""
def __init__(self, figureNum, valueColor = 'blue',theoColor='red'):
self._shape = (3,2)
self._figureNum = figureNum
self._theoColor = theoColor
self._valueColor = valueColor
self._patch = [patches.Patch(color=theoColor,label='Predicted Values')]
#creating dictionary of lists
#each of which contains independent and dependent values
self._rc = str(self._shape[0]) + str(self._shape[1])
self._subplotIDs = {"RunClock:SystemVoltage":int(self._rc+"1"),
"RunClock:BatteryRemaining":int(self._rc+"2"),
"RunClock:TotalDisplacement":int(self._rc+"3"),
"IBeam:TotalDisplacement":int(self._rc+"4"),
"SetSpeed:RPM":int(self._rc+"4"),
"RPM:Speed":int(self._rc+"5")
}
self._values = {"RunClock:SystemVoltage":[ [],[] ],
"RunClock:BatteryRemaining":[ [],[] ],
"RunClock:TotalDisplacement":[ [],[] ],
"IBeam:TotalDisplacement":[ [],[] ],
"SetSpeed:RPM":[ [],[] ],
"RPM:Speed":[ [],[] ]
}
self._theoreticals = {"RunClock:SystemVoltage":[ [],[] ],
"RunClock:BatteryRemainingg":[ [],[] ],
"IBeam:TotalDisplacement":[ [],[] ],
"SetSpeed:RPM":[ [],[] ],
"RPM:Speed":[ [],[] ]
}
self._numPlots = len(self._values)
self._plt = self.setup_plot()
self._fig = self._plt.gcf()
def set_subplot(self, _id, title, xlabel, ylabel):
plt.subplot(_id)
plt.title(title, fontsize = 12)
plt.xlabel(xlabel, fontsize=10)
plt.ylabel(ylabel, fontsize=10)
plt.legend(handles=self._patch)
def setup_plot(self):
plt.figure(self._figureNum)
spID = self._subplotIDs
# 1) RunClock,SysVoltage
self.set_subplot(spID["RunClock:SystemVoltage"], "Voltage over time", "Run Time [sec]", "System Voltage [Volts]")
# 2) RunClock,BatteryRemaing
self.set_subplot(spID["RunClock:BatteryRemaining"], "Battery over time", "Run Time [sec]", "Battery Remaining [%]")
# 3) RunClock,TotalDisplacement
self.set_subplot(spID["RunClock:TotalDisplacement"], "Distance Down Tube", "Run Time [sec]", "Displacement [meters]")
# 4) IBeam, TotalDisplacement
self.set_subplot(spID["IBeam:TotalDisplacement"], "Distance (IBeams)", "IBeam [count]", "Displacement [meters]")
# 5) SetSpeed, RPM
self.set_subplot(spID["SetSpeed:RPM"], "Real Speed vs Set Speed", "Programmed Speed [rpm]", "Set Speed [rpm]")
# 6) RPM, Speed
self.set_subplot(spID["RPM:Speed"], "RPM vs Linear Speed", "RPM [rpm]", "Real Speed [m/s]")
plt.tight_layout()
return plt
def graph(self,key,x,y):
self._values[key][0].append(x)
self._values[key][1].append(y)
xReal = self._values[key][0]
yReal = self._values[key][1]
self._plt.subplot(self._subplotIDs[key])
self._plt.cla()
self._plt.plot( self._values[key], color = self._valueColor )
if key in self._theoreticals:
theo = self.calc_theoretical(key,x)
self._theoreticals[key][0].append(x)
self._theoreticals[key][1].append(theo)
return self._plt
def clear():
self._fig.clear()
def calc_theoretical(self,key,x):
"""
ALL THEORETICAL VALUES MUST BE REGRESSED OR CALCULATED ONCE DAQ IS COMPLETE
FOR NOW PLACEHOLDER IS USED
"""
return 2 * x
| mit |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/statistics/statsmodels/statsmodels_quantile_regression.py | 2 | 3038 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pandas as pd
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import sklearn.linear_model
import matplotlib.pyplot as plt
# REF [site] >> https://github.com/groverpr/Machine-Learning/blob/master/notebooks/09_Quantile_Regression.ipynb
def simple_quantile_regression_example():
y = np.arange(1, 25, 0.25)
# Linear relationship with contant variance of residual.
x1 = y.copy() + np.random.randn(96)
# Non-contant variance with residuals .
x2 = y.copy()
y2 = x2 + np.concatenate((np.random.randn(20) * 0.5,
np.random.randn(20) * 1,
np.random.randn(20) * 4,
np.random.randn(20) * 6,
np.random.randn(16) * 8), axis=0)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5.5))
ax1.plot(x1, y, 'o')
ax1.set_xlabel('X1')
ax1.set_ylabel('Y')
ax1.set_title('Contant variance of residuals')
ax2.plot(x2, y2, 'o')
ax2.set_xlabel('X2')
ax2.set_ylabel('Y')
ax2.set_title('Non contant variance of residuals')
fig.tight_layout()
plt.show()
#--------------------
# Linear regression
lr = sklearn.linear_model.LinearRegression()
lr.fit(x1.reshape(-1, 1), y.reshape(-1, 1))
lr2 = sklearn.linear_model.LinearRegression()
lr2.fit(x2.reshape(-1, 1), y.reshape(-1, 1))
fig, (ax1,ax2) = plt.subplots(1, 2, figsize = (12, 5.5) )
ax1.plot(x1, y, 'o')
ax1.set_xlabel('X1')
ax1.set_ylabel('Y')
ax1.set_title('Contant variance of residuals')
ax1.plot(x1, lr.predict(x1.reshape(-1, 1)))
ax2.plot(x2, y2, 'o')
ax2.set_xlabel('X2')
ax2.set_ylabel('Y')
ax2.set_title('Non contant variance of residuals')
ax2.plot(x2, lr2.predict(x2.reshape(-1, 1)))
fig.tight_layout()
plt.show()
#--------------------
# Quantile regression.
data = pd.DataFrame(data={'X': x2, 'Y': y2})
mod = smf.quantreg('Y ~ X', data)
res = mod.fit(q=0.5)
def fit_model(q):
res = mod.fit(q=q)
return [q, res.params['Intercept'], res.params['X']] + res.conf_int().loc['X'].tolist()
quantiles = (0.05, 0.95)
models = [fit_model(x) for x in quantiles]
models = pd.DataFrame(models, columns=['q', 'a', 'b', 'lb', 'ub'])
ols = smf.ols('Y ~ X', data).fit()
ols_ci = ols.conf_int().loc['X'].tolist()
ols = dict(a = ols.params['Intercept'],
b = ols.params['X'],
lb = ols_ci[0],
ub = ols_ci[1])
print(models)
print(ols)
xn = np.arange(data.X.min(), data.X.max(), 2)
get_y = lambda a, b: a + b * xn
fig, ax = plt.subplots(figsize=(8, 6))
for i in range(models.shape[0]):
yn = get_y(models.a[i], models.b[i])
ax.plot(xn, yn, linestyle='dotted', color='grey')
yn = get_y(ols['a'], ols['b'])
ax.plot(xn, yn, color='red', label='OLS')
ax.scatter(data.X, data.Y, alpha=0.2)
legend = ax.legend()
ax.set_xlabel('X', fontsize=16)
ax.set_ylabel('Y', fontsize=16)
ax.set_title('Quantile regression with 0.05 and 0.95 quantiles')
fig.tight_layout()
plt.show()
def main():
simple_quantile_regression_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
gcarq/freqtrade | tests/optimize/test_backtesting.py | 1 | 36022 | # pragma pylint: disable=missing-docstring, W0212, line-too-long, C0103, unused-argument
import random
from pathlib import Path
from unittest.mock import MagicMock, PropertyMock
import numpy as np
import pandas as pd
import pytest
from arrow import Arrow
from freqtrade import constants
from freqtrade.commands.optimize_commands import setup_optimize_configuration, start_backtesting
from freqtrade.configuration import TimeRange
from freqtrade.data import history
from freqtrade.data.btanalysis import BT_DATA_COLUMNS, evaluate_result_multi
from freqtrade.data.converter import clean_ohlcv_dataframe
from freqtrade.data.dataprovider import DataProvider
from freqtrade.data.history import get_timerange
from freqtrade.exceptions import DependencyException, OperationalException
from freqtrade.optimize.backtesting import Backtesting
from freqtrade.resolvers import StrategyResolver
from freqtrade.state import RunMode
from freqtrade.strategy.interface import SellType
from tests.conftest import (get_args, log_has, log_has_re, patch_exchange,
patched_configuration_load_config_file)
ORDER_TYPES = [
{
'buy': 'limit',
'sell': 'limit',
'stoploss': 'limit',
'stoploss_on_exchange': False
},
{
'buy': 'limit',
'sell': 'limit',
'stoploss': 'limit',
'stoploss_on_exchange': True
}]
def trim_dictlist(dict_list, num):
new = {}
for pair, pair_data in dict_list.items():
new[pair] = pair_data[num:].reset_index()
return new
def load_data_test(what, testdatadir):
timerange = TimeRange.parse_timerange('1510694220-1510700340')
data = history.load_pair_history(pair='UNITTEST/BTC', datadir=testdatadir,
timeframe='1m', timerange=timerange,
drop_incomplete=False,
fill_up_missing=False)
base = 0.001
if what == 'raise':
data.loc[:, 'open'] = data.index * base
data.loc[:, 'high'] = data.index * base + 0.0001
data.loc[:, 'low'] = data.index * base - 0.0001
data.loc[:, 'close'] = data.index * base
if what == 'lower':
data.loc[:, 'open'] = 1 - data.index * base
data.loc[:, 'high'] = 1 - data.index * base + 0.0001
data.loc[:, 'low'] = 1 - data.index * base - 0.0001
data.loc[:, 'close'] = 1 - data.index * base
if what == 'sine':
hz = 0.1 # frequency
data.loc[:, 'open'] = np.sin(data.index * hz) / 1000 + base
data.loc[:, 'high'] = np.sin(data.index * hz) / 1000 + base + 0.0001
data.loc[:, 'low'] = np.sin(data.index * hz) / 1000 + base - 0.0001
data.loc[:, 'close'] = np.sin(data.index * hz) / 1000 + base
return {'UNITTEST/BTC': clean_ohlcv_dataframe(data, timeframe='1m', pair='UNITTEST/BTC',
fill_missing=True)}
def simple_backtest(config, contour, mocker, testdatadir) -> None:
patch_exchange(mocker)
config['timeframe'] = '1m'
backtesting = Backtesting(config)
data = load_data_test(contour, testdatadir)
processed = backtesting.strategy.ohlcvdata_to_dataframe(data)
min_date, max_date = get_timerange(processed)
assert isinstance(processed, dict)
results = backtesting.backtest(
processed=processed,
stake_amount=config['stake_amount'],
start_date=min_date,
end_date=max_date,
max_open_trades=1,
position_stacking=False,
enable_protections=config.get('enable_protections', False),
)
# results :: <class 'pandas.core.frame.DataFrame'>
return results
# FIX: fixturize this?
def _make_backtest_conf(mocker, datadir, conf=None, pair='UNITTEST/BTC'):
data = history.load_data(datadir=datadir, timeframe='1m', pairs=[pair])
data = trim_dictlist(data, -201)
patch_exchange(mocker)
backtesting = Backtesting(conf)
processed = backtesting.strategy.ohlcvdata_to_dataframe(data)
min_date, max_date = get_timerange(processed)
return {
'processed': processed,
'stake_amount': conf['stake_amount'],
'start_date': min_date,
'end_date': max_date,
'max_open_trades': 10,
'position_stacking': False,
}
def _trend(signals, buy_value, sell_value):
n = len(signals['low'])
buy = np.zeros(n)
sell = np.zeros(n)
for i in range(0, len(signals['buy'])):
if random.random() > 0.5: # Both buy and sell signals at same timeframe
buy[i] = buy_value
sell[i] = sell_value
signals['buy'] = buy
signals['sell'] = sell
return signals
def _trend_alternate(dataframe=None, metadata=None):
signals = dataframe
low = signals['low']
n = len(low)
buy = np.zeros(n)
sell = np.zeros(n)
for i in range(0, len(buy)):
if i % 2 == 0:
buy[i] = 1
else:
sell[i] = 1
signals['buy'] = buy
signals['sell'] = sell
return dataframe
# Unit tests
def test_setup_optimize_configuration_without_arguments(mocker, default_conf, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
args = [
'backtesting',
'--config', 'config.json',
'--strategy', 'DefaultStrategy',
]
config = setup_optimize_configuration(get_args(args), RunMode.BACKTEST)
assert 'max_open_trades' in config
assert 'stake_currency' in config
assert 'stake_amount' in config
assert 'exchange' in config
assert 'pair_whitelist' in config['exchange']
assert 'datadir' in config
assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog)
assert 'timeframe' in config
assert not log_has_re('Parameter -i/--ticker-interval detected .*', caplog)
assert 'position_stacking' not in config
assert not log_has('Parameter --enable-position-stacking detected ...', caplog)
assert 'timerange' not in config
assert 'export' not in config
assert 'runmode' in config
assert config['runmode'] == RunMode.BACKTEST
def test_setup_bt_configuration_with_arguments(mocker, default_conf, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
mocker.patch(
'freqtrade.configuration.configuration.create_datadir',
lambda c, x: x
)
args = [
'backtesting',
'--config', 'config.json',
'--strategy', 'DefaultStrategy',
'--datadir', '/foo/bar',
'--timeframe', '1m',
'--enable-position-stacking',
'--disable-max-market-positions',
'--timerange', ':100',
'--export', '/bar/foo',
'--export-filename', 'foo_bar.json',
'--fee', '0',
]
config = setup_optimize_configuration(get_args(args), RunMode.BACKTEST)
assert 'max_open_trades' in config
assert 'stake_currency' in config
assert 'stake_amount' in config
assert 'exchange' in config
assert 'pair_whitelist' in config['exchange']
assert 'datadir' in config
assert config['runmode'] == RunMode.BACKTEST
assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog)
assert 'timeframe' in config
assert log_has('Parameter -i/--timeframe detected ... Using timeframe: 1m ...',
caplog)
assert 'position_stacking' in config
assert log_has('Parameter --enable-position-stacking detected ...', caplog)
assert 'use_max_market_positions' in config
assert log_has('Parameter --disable-max-market-positions detected ...', caplog)
assert log_has('max_open_trades set to unlimited ...', caplog)
assert 'timerange' in config
assert log_has('Parameter --timerange detected: {} ...'.format(config['timerange']), caplog)
assert 'export' in config
assert log_has('Parameter --export detected: {} ...'.format(config['export']), caplog)
assert 'exportfilename' in config
assert isinstance(config['exportfilename'], Path)
assert log_has('Storing backtest results to {} ...'.format(config['exportfilename']), caplog)
assert 'fee' in config
assert log_has('Parameter --fee detected, setting fee to: {} ...'.format(config['fee']), caplog)
def test_setup_optimize_configuration_unlimited_stake_amount(mocker, default_conf, caplog) -> None:
default_conf['stake_amount'] = constants.UNLIMITED_STAKE_AMOUNT
patched_configuration_load_config_file(mocker, default_conf)
args = [
'backtesting',
'--config', 'config.json',
'--strategy', 'DefaultStrategy',
]
with pytest.raises(DependencyException, match=r'.`stake_amount`.*'):
setup_optimize_configuration(get_args(args), RunMode.BACKTEST)
def test_start(mocker, fee, default_conf, caplog) -> None:
start_mock = MagicMock()
mocker.patch('freqtrade.exchange.Exchange.get_fee', fee)
patch_exchange(mocker)
mocker.patch('freqtrade.optimize.backtesting.Backtesting.start', start_mock)
patched_configuration_load_config_file(mocker, default_conf)
args = [
'backtesting',
'--config', 'config.json',
'--strategy', 'DefaultStrategy',
]
pargs = get_args(args)
start_backtesting(pargs)
assert log_has('Starting freqtrade in Backtesting mode', caplog)
assert start_mock.call_count == 1
@pytest.mark.parametrize("order_types", ORDER_TYPES)
def test_backtesting_init(mocker, default_conf, order_types) -> None:
"""
Check that stoploss_on_exchange is set to False while backtesting
since backtesting assumes a perfect stoploss anyway.
"""
default_conf["order_types"] = order_types
patch_exchange(mocker)
get_fee = mocker.patch('freqtrade.exchange.Exchange.get_fee', MagicMock(return_value=0.5))
backtesting = Backtesting(default_conf)
assert backtesting.config == default_conf
assert backtesting.timeframe == '5m'
assert callable(backtesting.strategy.ohlcvdata_to_dataframe)
assert callable(backtesting.strategy.advise_buy)
assert callable(backtesting.strategy.advise_sell)
assert isinstance(backtesting.strategy.dp, DataProvider)
get_fee.assert_called()
assert backtesting.fee == 0.5
assert not backtesting.strategy.order_types["stoploss_on_exchange"]
def test_backtesting_init_no_timeframe(mocker, default_conf, caplog) -> None:
patch_exchange(mocker)
del default_conf['timeframe']
default_conf['strategy_list'] = ['DefaultStrategy',
'SampleStrategy']
mocker.patch('freqtrade.exchange.Exchange.get_fee', MagicMock(return_value=0.5))
with pytest.raises(OperationalException):
Backtesting(default_conf)
log_has("Ticker-interval needs to be set in either configuration "
"or as cli argument `--ticker-interval 5m`", caplog)
def test_data_with_fee(default_conf, mocker, testdatadir) -> None:
patch_exchange(mocker)
default_conf['fee'] = 0.1234
fee_mock = mocker.patch('freqtrade.exchange.Exchange.get_fee', MagicMock(return_value=0.5))
backtesting = Backtesting(default_conf)
assert backtesting.fee == 0.1234
assert fee_mock.call_count == 0
default_conf['fee'] = 0.0
backtesting = Backtesting(default_conf)
assert backtesting.fee == 0.0
assert fee_mock.call_count == 0
def test_data_to_dataframe_bt(default_conf, mocker, testdatadir) -> None:
patch_exchange(mocker)
timerange = TimeRange.parse_timerange('1510694220-1510700340')
data = history.load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange,
fill_up_missing=True)
backtesting = Backtesting(default_conf)
processed = backtesting.strategy.ohlcvdata_to_dataframe(data)
assert len(processed['UNITTEST/BTC']) == 102
# Load strategy to compare the result between Backtesting function and strategy are the same
default_conf.update({'strategy': 'DefaultStrategy'})
strategy = StrategyResolver.load_strategy(default_conf)
processed2 = strategy.ohlcvdata_to_dataframe(data)
assert processed['UNITTEST/BTC'].equals(processed2['UNITTEST/BTC'])
def test_backtesting_start(default_conf, mocker, testdatadir, caplog) -> None:
def get_timerange(input1):
return Arrow(2017, 11, 14, 21, 17), Arrow(2017, 11, 14, 22, 59)
mocker.patch('freqtrade.data.history.get_timerange', get_timerange)
patch_exchange(mocker)
mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest')
mocker.patch('freqtrade.optimize.backtesting.generate_backtest_stats')
mocker.patch('freqtrade.optimize.backtesting.show_backtest_results')
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
PropertyMock(return_value=['UNITTEST/BTC']))
default_conf['timeframe'] = '1m'
default_conf['datadir'] = testdatadir
default_conf['export'] = None
default_conf['timerange'] = '-1510694220'
backtesting = Backtesting(default_conf)
backtesting.start()
# check the logs, that will contain the backtest result
exists = [
'Using stake_currency: BTC ...',
'Using stake_amount: 0.001 ...',
'Backtesting with data from 2017-11-14 21:17:00 '
'up to 2017-11-14 22:59:00 (0 days)..'
]
for line in exists:
assert log_has(line, caplog)
assert backtesting.strategy.dp._pairlists is not None
def test_backtesting_start_no_data(default_conf, mocker, caplog, testdatadir) -> None:
def get_timerange(input1):
return Arrow(2017, 11, 14, 21, 17), Arrow(2017, 11, 14, 22, 59)
mocker.patch('freqtrade.data.history.history_utils.load_pair_history',
MagicMock(return_value=pd.DataFrame()))
mocker.patch('freqtrade.data.history.get_timerange', get_timerange)
patch_exchange(mocker)
mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest')
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
PropertyMock(return_value=['UNITTEST/BTC']))
default_conf['timeframe'] = "1m"
default_conf['datadir'] = testdatadir
default_conf['export'] = None
default_conf['timerange'] = '20180101-20180102'
backtesting = Backtesting(default_conf)
with pytest.raises(OperationalException, match='No data found. Terminating.'):
backtesting.start()
def test_backtesting_no_pair_left(default_conf, mocker, caplog, testdatadir) -> None:
mocker.patch('freqtrade.exchange.Exchange.exchange_has', MagicMock(return_value=True))
mocker.patch('freqtrade.data.history.history_utils.load_pair_history',
MagicMock(return_value=pd.DataFrame()))
mocker.patch('freqtrade.data.history.get_timerange', get_timerange)
patch_exchange(mocker)
mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest')
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
PropertyMock(return_value=[]))
default_conf['timeframe'] = "1m"
default_conf['datadir'] = testdatadir
default_conf['export'] = None
default_conf['timerange'] = '20180101-20180102'
with pytest.raises(OperationalException, match='No pair in whitelist.'):
Backtesting(default_conf)
default_conf['pairlists'] = [{"method": "VolumePairList", "number_assets": 5}]
with pytest.raises(OperationalException, match='VolumePairList not allowed for backtesting.'):
Backtesting(default_conf)
def test_backtesting_pairlist_list(default_conf, mocker, caplog, testdatadir, tickers) -> None:
mocker.patch('freqtrade.exchange.Exchange.exchange_has', MagicMock(return_value=True))
mocker.patch('freqtrade.exchange.Exchange.get_tickers', tickers)
mocker.patch('freqtrade.exchange.Exchange.price_to_precision', lambda s, x, y: y)
mocker.patch('freqtrade.data.history.get_timerange', get_timerange)
patch_exchange(mocker)
mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest')
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
PropertyMock(return_value=['XRP/BTC']))
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.refresh_pairlist')
default_conf['ticker_interval'] = "1m"
default_conf['datadir'] = testdatadir
default_conf['export'] = None
# Use stoploss from strategy
del default_conf['stoploss']
default_conf['timerange'] = '20180101-20180102'
default_conf['pairlists'] = [{"method": "VolumePairList", "number_assets": 5}]
with pytest.raises(OperationalException, match='VolumePairList not allowed for backtesting.'):
Backtesting(default_conf)
default_conf['pairlists'] = [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}]
with pytest.raises(OperationalException,
match='PerformanceFilter not allowed for backtesting.'):
Backtesting(default_conf)
default_conf['pairlists'] = [{"method": "StaticPairList"}, {"method": "PrecisionFilter"}, ]
Backtesting(default_conf)
# Multiple strategies
default_conf['strategy_list'] = ['DefaultStrategy', 'TestStrategyLegacy']
with pytest.raises(OperationalException,
match='PrecisionFilter not allowed for backtesting multiple strategies.'):
Backtesting(default_conf)
def test_backtest(default_conf, fee, mocker, testdatadir) -> None:
default_conf['ask_strategy']['use_sell_signal'] = False
mocker.patch('freqtrade.exchange.Exchange.get_fee', fee)
patch_exchange(mocker)
backtesting = Backtesting(default_conf)
pair = 'UNITTEST/BTC'
timerange = TimeRange('date', None, 1517227800, 0)
data = history.load_data(datadir=testdatadir, timeframe='5m', pairs=['UNITTEST/BTC'],
timerange=timerange)
processed = backtesting.strategy.ohlcvdata_to_dataframe(data)
min_date, max_date = get_timerange(processed)
results = backtesting.backtest(
processed=processed,
stake_amount=default_conf['stake_amount'],
start_date=min_date,
end_date=max_date,
max_open_trades=10,
position_stacking=False,
)
assert not results.empty
assert len(results) == 2
expected = pd.DataFrame(
{'pair': [pair, pair],
'profit_percent': [0.0, 0.0],
'profit_abs': [0.0, 0.0],
'open_date': pd.to_datetime([Arrow(2018, 1, 29, 18, 40, 0).datetime,
Arrow(2018, 1, 30, 3, 30, 0).datetime], utc=True
),
'open_rate': [0.104445, 0.10302485],
'open_fee': [0.0025, 0.0025],
'close_date': pd.to_datetime([Arrow(2018, 1, 29, 22, 35, 0).datetime,
Arrow(2018, 1, 30, 4, 10, 0).datetime], utc=True),
'close_rate': [0.104969, 0.103541],
'close_fee': [0.0025, 0.0025],
'amount': [0.00957442, 0.0097064],
'trade_duration': [235, 40],
'open_at_end': [False, False],
'sell_reason': [SellType.ROI, SellType.ROI]
})
pd.testing.assert_frame_equal(results, expected)
data_pair = processed[pair]
for _, t in results.iterrows():
ln = data_pair.loc[data_pair["date"] == t["open_date"]]
# Check open trade rate alignes to open rate
assert ln is not None
assert round(ln.iloc[0]["open"], 6) == round(t["open_rate"], 6)
# check close trade rate alignes to close rate or is between high and low
ln = data_pair.loc[data_pair["date"] == t["close_date"]]
assert (round(ln.iloc[0]["open"], 6) == round(t["close_rate"], 6) or
round(ln.iloc[0]["low"], 6) < round(
t["close_rate"], 6) < round(ln.iloc[0]["high"], 6))
def test_backtest_1min_timeframe(default_conf, fee, mocker, testdatadir) -> None:
default_conf['ask_strategy']['use_sell_signal'] = False
mocker.patch('freqtrade.exchange.Exchange.get_fee', fee)
patch_exchange(mocker)
backtesting = Backtesting(default_conf)
# Run a backtesting for an exiting 1min timeframe
timerange = TimeRange.parse_timerange('1510688220-1510700340')
data = history.load_data(datadir=testdatadir, timeframe='1m', pairs=['UNITTEST/BTC'],
timerange=timerange)
processed = backtesting.strategy.ohlcvdata_to_dataframe(data)
min_date, max_date = get_timerange(processed)
results = backtesting.backtest(
processed=processed,
stake_amount=default_conf['stake_amount'],
start_date=min_date,
end_date=max_date,
max_open_trades=1,
position_stacking=False,
)
assert not results.empty
assert len(results) == 1
def test_processed(default_conf, mocker, testdatadir) -> None:
patch_exchange(mocker)
backtesting = Backtesting(default_conf)
dict_of_tickerrows = load_data_test('raise', testdatadir)
dataframes = backtesting.strategy.ohlcvdata_to_dataframe(dict_of_tickerrows)
dataframe = dataframes['UNITTEST/BTC']
cols = dataframe.columns
# assert the dataframe got some of the indicator columns
for col in ['close', 'high', 'low', 'open', 'date',
'ema10', 'rsi', 'fastd', 'plus_di']:
assert col in cols
def test_backtest_pricecontours_protections(default_conf, fee, mocker, testdatadir) -> None:
# While this test IS a copy of test_backtest_pricecontours, it's needed to ensure
# results do not carry-over to the next run, which is not given by using parametrize.
default_conf['protections'] = [
{
"method": "CooldownPeriod",
"stop_duration": 3,
}]
default_conf['enable_protections'] = True
mocker.patch('freqtrade.exchange.Exchange.get_fee', fee)
tests = [
['sine', 9],
['raise', 10],
['lower', 0],
['sine', 9],
['raise', 10],
]
# While buy-signals are unrealistic, running backtesting
# over and over again should not cause different results
for [contour, numres] in tests:
assert len(simple_backtest(default_conf, contour, mocker, testdatadir)) == numres
@pytest.mark.parametrize('protections,contour,expected', [
(None, 'sine', 35),
(None, 'raise', 19),
(None, 'lower', 0),
(None, 'sine', 35),
(None, 'raise', 19),
([{"method": "CooldownPeriod", "stop_duration": 3}], 'sine', 9),
([{"method": "CooldownPeriod", "stop_duration": 3}], 'raise', 10),
([{"method": "CooldownPeriod", "stop_duration": 3}], 'lower', 0),
([{"method": "CooldownPeriod", "stop_duration": 3}], 'sine', 9),
([{"method": "CooldownPeriod", "stop_duration": 3}], 'raise', 10),
])
def test_backtest_pricecontours(default_conf, fee, mocker, testdatadir,
protections, contour, expected) -> None:
if protections:
default_conf['protections'] = protections
default_conf['enable_protections'] = True
mocker.patch('freqtrade.exchange.Exchange.get_fee', fee)
# While buy-signals are unrealistic, running backtesting
# over and over again should not cause different results
assert len(simple_backtest(default_conf, contour, mocker, testdatadir)) == expected
def test_backtest_clash_buy_sell(mocker, default_conf, testdatadir):
# Override the default buy trend function in our default_strategy
def fun(dataframe=None, pair=None):
buy_value = 1
sell_value = 1
return _trend(dataframe, buy_value, sell_value)
backtest_conf = _make_backtest_conf(mocker, conf=default_conf, datadir=testdatadir)
backtesting = Backtesting(default_conf)
backtesting.strategy.advise_buy = fun # Override
backtesting.strategy.advise_sell = fun # Override
results = backtesting.backtest(**backtest_conf)
assert results.empty
def test_backtest_only_sell(mocker, default_conf, testdatadir):
# Override the default buy trend function in our default_strategy
def fun(dataframe=None, pair=None):
buy_value = 0
sell_value = 1
return _trend(dataframe, buy_value, sell_value)
backtest_conf = _make_backtest_conf(mocker, conf=default_conf, datadir=testdatadir)
backtesting = Backtesting(default_conf)
backtesting.strategy.advise_buy = fun # Override
backtesting.strategy.advise_sell = fun # Override
results = backtesting.backtest(**backtest_conf)
assert results.empty
def test_backtest_alternate_buy_sell(default_conf, fee, mocker, testdatadir):
mocker.patch('freqtrade.exchange.Exchange.get_fee', fee)
backtest_conf = _make_backtest_conf(mocker, conf=default_conf,
pair='UNITTEST/BTC', datadir=testdatadir)
default_conf['timeframe'] = '1m'
backtesting = Backtesting(default_conf)
backtesting.strategy.advise_buy = _trend_alternate # Override
backtesting.strategy.advise_sell = _trend_alternate # Override
results = backtesting.backtest(**backtest_conf)
# 200 candles in backtest data
# won't buy on first (shifted by 1)
# 100 buys signals
assert len(results) == 100
# One trade was force-closed at the end
assert len(results.loc[results.open_at_end]) == 0
@pytest.mark.parametrize("pair", ['ADA/BTC', 'LTC/BTC'])
@pytest.mark.parametrize("tres", [0, 20, 30])
def test_backtest_multi_pair(default_conf, fee, mocker, tres, pair, testdatadir):
def _trend_alternate_hold(dataframe=None, metadata=None):
"""
Buy every xth candle - sell every other xth -2 (hold on to pairs a bit)
"""
if metadata['pair'] in ('ETH/BTC', 'LTC/BTC'):
multi = 20
else:
multi = 18
dataframe['buy'] = np.where(dataframe.index % multi == 0, 1, 0)
dataframe['sell'] = np.where((dataframe.index + multi - 2) % multi == 0, 1, 0)
return dataframe
mocker.patch('freqtrade.exchange.Exchange.get_fee', fee)
patch_exchange(mocker)
pairs = ['ADA/BTC', 'DASH/BTC', 'ETH/BTC', 'LTC/BTC', 'NXT/BTC']
data = history.load_data(datadir=testdatadir, timeframe='5m', pairs=pairs)
# Only use 500 lines to increase performance
data = trim_dictlist(data, -500)
# Remove data for one pair from the beginning of the data
data[pair] = data[pair][tres:].reset_index()
default_conf['timeframe'] = '5m'
backtesting = Backtesting(default_conf)
backtesting.strategy.advise_buy = _trend_alternate_hold # Override
backtesting.strategy.advise_sell = _trend_alternate_hold # Override
processed = backtesting.strategy.ohlcvdata_to_dataframe(data)
min_date, max_date = get_timerange(processed)
backtest_conf = {
'processed': processed,
'stake_amount': default_conf['stake_amount'],
'start_date': min_date,
'end_date': max_date,
'max_open_trades': 3,
'position_stacking': False,
}
results = backtesting.backtest(**backtest_conf)
# Make sure we have parallel trades
assert len(evaluate_result_multi(results, '5m', 2)) > 0
# make sure we don't have trades with more than configured max_open_trades
assert len(evaluate_result_multi(results, '5m', 3)) == 0
backtest_conf = {
'processed': processed,
'stake_amount': default_conf['stake_amount'],
'start_date': min_date,
'end_date': max_date,
'max_open_trades': 1,
'position_stacking': False,
}
results = backtesting.backtest(**backtest_conf)
assert len(evaluate_result_multi(results, '5m', 1)) == 0
def test_backtest_start_timerange(default_conf, mocker, caplog, testdatadir):
patch_exchange(mocker)
mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest')
mocker.patch('freqtrade.optimize.backtesting.generate_backtest_stats')
mocker.patch('freqtrade.optimize.backtesting.show_backtest_results')
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
PropertyMock(return_value=['UNITTEST/BTC']))
patched_configuration_load_config_file(mocker, default_conf)
args = [
'backtesting',
'--config', 'config.json',
'--strategy', 'DefaultStrategy',
'--datadir', str(testdatadir),
'--timeframe', '1m',
'--timerange', '1510694220-1510700340',
'--enable-position-stacking',
'--disable-max-market-positions'
]
args = get_args(args)
start_backtesting(args)
# check the logs, that will contain the backtest result
exists = [
'Parameter -i/--timeframe detected ... Using timeframe: 1m ...',
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
'Parameter --timerange detected: 1510694220-1510700340 ...',
f'Using data directory: {testdatadir} ...',
'Using stake_currency: BTC ...',
'Using stake_amount: 0.001 ...',
'Loading data from 2017-11-14 20:57:00 '
'up to 2017-11-14 22:58:00 (0 days)..',
'Backtesting with data from 2017-11-14 21:17:00 '
'up to 2017-11-14 22:58:00 (0 days)..',
'Parameter --enable-position-stacking detected ...'
]
for line in exists:
assert log_has(line, caplog)
@pytest.mark.filterwarnings("ignore:deprecated")
def test_backtest_start_multi_strat(default_conf, mocker, caplog, testdatadir):
patch_exchange(mocker)
backtestmock = MagicMock(return_value=pd.DataFrame(columns=BT_DATA_COLUMNS + ['profit_abs']))
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
PropertyMock(return_value=['UNITTEST/BTC']))
mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock)
text_table_mock = MagicMock()
sell_reason_mock = MagicMock()
strattable_mock = MagicMock()
strat_summary = MagicMock()
mocker.patch.multiple('freqtrade.optimize.optimize_reports',
text_table_bt_results=text_table_mock,
text_table_strategy=strattable_mock,
generate_pair_metrics=MagicMock(),
generate_sell_reason_stats=sell_reason_mock,
generate_strategy_metrics=strat_summary,
generate_daily_stats=MagicMock(),
)
patched_configuration_load_config_file(mocker, default_conf)
args = [
'backtesting',
'--config', 'config.json',
'--datadir', str(testdatadir),
'--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'),
'--timeframe', '1m',
'--timerange', '1510694220-1510700340',
'--enable-position-stacking',
'--disable-max-market-positions',
'--strategy-list',
'DefaultStrategy',
'TestStrategyLegacy',
]
args = get_args(args)
start_backtesting(args)
# 2 backtests, 4 tables
assert backtestmock.call_count == 2
assert text_table_mock.call_count == 4
assert strattable_mock.call_count == 1
assert sell_reason_mock.call_count == 2
assert strat_summary.call_count == 1
# check the logs, that will contain the backtest result
exists = [
'Parameter -i/--timeframe detected ... Using timeframe: 1m ...',
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
'Parameter --timerange detected: 1510694220-1510700340 ...',
f'Using data directory: {testdatadir} ...',
'Using stake_currency: BTC ...',
'Using stake_amount: 0.001 ...',
'Loading data from 2017-11-14 20:57:00 '
'up to 2017-11-14 22:58:00 (0 days)..',
'Backtesting with data from 2017-11-14 21:17:00 '
'up to 2017-11-14 22:58:00 (0 days)..',
'Parameter --enable-position-stacking detected ...',
'Running backtesting for Strategy DefaultStrategy',
'Running backtesting for Strategy TestStrategyLegacy',
]
for line in exists:
assert log_has(line, caplog)
@pytest.mark.filterwarnings("ignore:deprecated")
def test_backtest_start_multi_strat_nomock(default_conf, mocker, caplog, testdatadir, capsys):
patch_exchange(mocker)
backtestmock = MagicMock(side_effect=[
pd.DataFrame({'pair': ['XRP/BTC', 'LTC/BTC'],
'profit_percent': [0.0, 0.0],
'profit_abs': [0.0, 0.0],
'open_date': pd.to_datetime(['2018-01-29 18:40:00',
'2018-01-30 03:30:00', ], utc=True
),
'close_date': pd.to_datetime(['2018-01-29 20:45:00',
'2018-01-30 05:35:00', ], utc=True),
'trade_duration': [235, 40],
'open_at_end': [False, False],
'open_rate': [0.104445, 0.10302485],
'close_rate': [0.104969, 0.103541],
'sell_reason': [SellType.ROI, SellType.ROI]
}),
pd.DataFrame({'pair': ['XRP/BTC', 'LTC/BTC', 'ETH/BTC'],
'profit_percent': [0.03, 0.01, 0.1],
'profit_abs': [0.01, 0.02, 0.2],
'open_date': pd.to_datetime(['2018-01-29 18:40:00',
'2018-01-30 03:30:00',
'2018-01-30 05:30:00'], utc=True
),
'close_date': pd.to_datetime(['2018-01-29 20:45:00',
'2018-01-30 05:35:00',
'2018-01-30 08:30:00'], utc=True),
'trade_duration': [47, 40, 20],
'open_at_end': [False, False, False],
'open_rate': [0.104445, 0.10302485, 0.122541],
'close_rate': [0.104969, 0.103541, 0.123541],
'sell_reason': [SellType.ROI, SellType.ROI, SellType.STOP_LOSS]
}),
])
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
PropertyMock(return_value=['UNITTEST/BTC']))
mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock)
patched_configuration_load_config_file(mocker, default_conf)
args = [
'backtesting',
'--config', 'config.json',
'--datadir', str(testdatadir),
'--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'),
'--timeframe', '1m',
'--timerange', '1510694220-1510700340',
'--enable-position-stacking',
'--disable-max-market-positions',
'--strategy-list',
'DefaultStrategy',
'TestStrategyLegacy',
]
args = get_args(args)
start_backtesting(args)
# check the logs, that will contain the backtest result
exists = [
'Parameter -i/--timeframe detected ... Using timeframe: 1m ...',
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
'Parameter --timerange detected: 1510694220-1510700340 ...',
f'Using data directory: {testdatadir} ...',
'Using stake_currency: BTC ...',
'Using stake_amount: 0.001 ...',
'Loading data from 2017-11-14 20:57:00 '
'up to 2017-11-14 22:58:00 (0 days)..',
'Backtesting with data from 2017-11-14 21:17:00 '
'up to 2017-11-14 22:58:00 (0 days)..',
'Parameter --enable-position-stacking detected ...',
'Running backtesting for Strategy DefaultStrategy',
'Running backtesting for Strategy TestStrategyLegacy',
]
for line in exists:
assert log_has(line, caplog)
captured = capsys.readouterr()
assert 'BACKTESTING REPORT' in captured.out
assert 'SELL REASON STATS' in captured.out
assert 'LEFT OPEN TRADES REPORT' in captured.out
assert 'STRATEGY SUMMARY' in captured.out
| gpl-3.0 |
zakkum42/Bosch | src/04-model/merge_and_compress.py | 1 | 1867 | import pandas as pd
import numpy as np
import pickle
import os
import math
import glob
from include.dataset_fnames import generate_station_data_fname, generate_data_fname, generate_response_data_fname, train_categorical_onehot_filename
from random import shuffle
from datetime import datetime
def load_and_compress_data(dirname, use_categoric_features=False):
fname = generate_data_fname(sample_type='train', data_type='numeric')
numeric_columns = pd.read_csv(fname, nrows=2).columns
numeric_fnames = sorted(glob.glob1(dirname, "train_numeric_*"))
if use_categoric_features:
fname = train_categorical_onehot_filename
categoric_columns = pd.read_csv(fname, nrows=2).columns
categoric_fnames = sorted(glob.glob1(dirname, "train_categorical_*"))
for list_index in range(len(numeric_fnames)):
numeric_fname = os.path.join(dirname, numeric_fnames[list_index])
numeric_df = pd.read_csv(numeric_fname, names=numeric_columns, index_col='Id')
del numeric_df['Response']
zfname = "train_numeric_0_" + str(list_index).zfill(3) + ".npz"
# print numeric_fname
# print zfname
if use_categoric_features:
categoric_fname = os.path.join(dirname, categoric_fnames[list_index])
categoric_df = pd.read_csv(categoric_fname, names=categoric_columns, index_col='Id')
numeric_df = numeric_df.join(categoric_df, how='inner')
del categoric_df
zfname = "train_numeric+categoric_0_" + str(list_index).zfill(3) + ".npz"
# print categoric_fname
# print zfname
print "Saving:", zfname
np.savez_compressed(zfname, data=numeric_df.values)
del numeric_df
if __name__ == '__main__':
load_and_compress_data('bs60000', use_categoric_features=True)
| apache-2.0 |
SCIP-Interfaces/PySCIPOpt | examples/unfinished/kcenter_binary_search.py | 2 | 4857 | """
kcenter_binary_search.py: use bisection for solving the k-center problem
bisects the interval [0, max facility-customer distance] until finding a
distance such that all customers are covered, but decreasing that distance
by a small amount delta would leave some uncovered.
Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012
"""
from pyscipopt import Model, quicksum, multidict
def kcover(I,J,c,k):
"""kcover -- minimize the number of uncovered customers from k facilities.
Parameters:
- I: set of customers
- J: set of potential facilities
- c[i,j]: cost of servicing customer i from facility j
- k: number of facilities to be used
Returns a model, ready to be solved.
"""
model = Model("k-center")
z,y,x = {},{},{}
for i in I:
z[i] = model.addVar(vtype="B", name="z(%s)"%i, obj=1)
for j in J:
y[j] = model.addVar(vtype="B", name="y(%s)"%j)
for i in I:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
for i in I:
model.addCons(quicksum(x[i,j] for j in J) + z[i] == 1, "Assign(%s)"%i)
for j in J:
model.addCons(x[i,j] <= y[j], "Strong(%s,%s)"%(i,j))
model.addCons(sum(y[j] for j in J) == k, "k_center")
model.data = x,y,z
return model
def solve_kcenter(I,J,c,k,delta):
"""solve_kcenter -- locate k facilities minimizing distance of most distant customer.
Parameters:
I - set of customers
J - set of potential facilities
c[i,j] - cost of servicing customer i from facility j
k - number of facilities to be used
delta - tolerance for terminating bisection
Returns:
- list of facilities to be used
- edges linking them to customers
"""
model = kcover(I,J,c,k)
x,y,z = model.data
facilities,edges = [],[]
LB = 0
UB = max(c[i,j] for (i,j) in c)
model.setObjlimit(0.1)
while UB-LB > delta:
theta = (UB+LB) / 2.
# print "\n\ncurrent theta:", theta
for j in J:
for i in I:
if c[i,j]>theta:
model.chgVarUb(x[i,j], 0.0)
else:
model.chgVarUb(x[i,j], 1.0)
# model.Params.OutputFlag = 0 # silent mode
model.setObjlimit(.1)
model.optimize()
if model.getStatus == "optimal":
# infeasibility = sum([z[i].X for i in I])
# print "infeasibility=",infeasibility
UB = theta
facilities = [j for j in y if model.getVal(y[j]) > .5]
edges = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > .5]
# print "updated solution:"
# print "facilities",facilities
# print "edges",edges
else: # infeasibility > 0:
LB = theta
return facilities,edges
import math
import random
def distance(x1,y1,x2,y2):
return math.sqrt((x2-x1)**2 + (y2-y1)**2)
def make_data(n,m,same=True):
if same == True:
I = range(n)
J = range(m)
x = [random.random() for i in range(max(m,n))] # positions of the points in the plane
y = [random.random() for i in range(max(m,n))]
else:
I = range(n)
J = range(n,n+m)
x = [random.random() for i in range(n+m)] # positions of the points in the plane
y = [random.random() for i in range(n+m)]
c = {}
for i in I:
for j in J:
c[i,j] = distance(x[i],y[i],x[j],y[j])
return I,J,c,x,y
if __name__ == "__main__":
random.seed(67)
n = 200
m = n
I,J,c,x_pos,y_pos = make_data(n,m,same=True)
k = 20
delta = 1.e-4
facilities,edges = solve_kcenter(I,J,c,k,delta)
print("Selected facilities:", facilities)
print("Edges:", edges)
print("Max distance from a facility to a customer: ", max([c[i,j] for (i,j) in edges]))
try: # plot the result using networkx and matplotlib
import networkx as NX
import matplotlib.pyplot as P
P.clf()
G = NX.Graph()
facilities = set(facilities)
unused = set(j for j in J if j not in facilities)
client = set(i for i in I if i not in facilities and i not in unused)
G.add_nodes_from(facilities)
G.add_nodes_from(client)
G.add_nodes_from(unused)
for (i,j) in edges:
G.add_edge(i,j)
position = {}
for i in range(len(x_pos)):
position[i] = (x_pos[i],y_pos[i])
NX.draw(G,position,with_labels=False,node_color="w",nodelist=facilities)
NX.draw(G,position,with_labels=False,node_color="c",nodelist=unused,node_size=50)
NX.draw(G,position,with_labels=False,node_color="g",nodelist=client,node_size=50)
P.show()
except ImportError:
print("install 'networkx' and 'matplotlib' for plotting")
| mit |
arbuz001/sms-tools | lectures/06-Harmonic-model/plots-code/sines-partials-harmonics.py | 24 | 2020 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/sine-440-490.wav')
w = np.hamming(3529)
N = 32768
hN = N/2
t = -20
pin = 4850
x1 = x[pin:pin+w.size]
mX1, pX1 = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX1, t)
pmag = mX1[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX1, pX1, ploc)
plt.figure(1, figsize=(9, 6))
plt.subplot(311)
plt.plot(fs*np.arange(mX1.size)/float(N), mX1-max(mX1), 'r', lw=1.5)
plt.plot(fs * iploc/N, ipmag-max(mX1), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([200, 1000, -80, 4])
plt.title('mX + peaks (sine-440-490.wav)')
(fs, x) = UF.wavread('../../../sounds/vibraphone-C6.wav')
w = np.blackman(401)
N = 1024
hN = N/2
t = -80
pin = 200
x2 = x[pin:pin+w.size]
mX2, pX2 = DFT.dftAnal(x2, w, N)
ploc = UF.peakDetection(mX2, t)
pmag = mX2[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX2, pX2, ploc)
plt.subplot(3,1,2)
plt.plot(fs*np.arange(mX2.size)/float(N), mX2-max(mX2), 'r', lw=1.5)
plt.plot(fs * iploc/N, ipmag-max(mX2), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([500,10000,-100,4])
plt.title('mX + peaks (vibraphone-C6.wav)')
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.blackman(651)
N = 2048
hN = N/2
t = -80
pin = 10000
x3 = x[pin:pin+w.size]
mX3, pX3 = DFT.dftAnal(x3, w, N)
ploc = UF.peakDetection(mX3, t)
pmag = mX3[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX3, pX3, ploc)
plt.subplot(3,1,3)
plt.plot(fs*np.arange(mX3.size)/float(N), mX3-max(mX3), 'r', lw=1.5)
plt.plot(fs * iploc/N, ipmag-max(mX3), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([0,6000,-70,2])
plt.title('mX + peaks (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('sines-partials-harmonics.png')
plt.show()
| agpl-3.0 |
larsmans/scikit-learn | sklearn/utils/testing.py | 3 | 21336 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regexp
except ImportError:
# for Py 2.6
def assert_raises_regexp(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).match(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if not e.category is np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not w[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)"
% (func.__name__, warning_class, w[0]))
# substring will match, the entire message with typo won't
msg = w[0].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if not check_in_message(msg):
raise AssertionError("The message received ('%s') for <%s> is "
"not the one you expected ('%s')"
% (msg, func.__name__, message
))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if not e.category is np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict: contains data as
columns_dict[column_name] = array of data
dataname: name of data set
matfile: file-like object or file name
ordering: list of column_names, determines the ordering in the data set
Note: this function transposes all arrays, while fetch_mldata only
transposes 'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer',
'TfidfTransformer', 'IsotonicRegression', 'OneHotEncoder',
'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier',
'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures']
def all_estimators(include_meta_estimators=False, include_other=False,
type_filter=None, include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter == 'classifier':
estimators = [est for est in estimators
if issubclass(est[1], ClassifierMixin)]
elif type_filter == 'regressor':
estimators = [est for est in estimators
if issubclass(est[1], RegressorMixin)]
elif type_filter == 'transformer':
estimators = [est for est in estimators
if issubclass(est[1], TransformerMixin)]
elif type_filter == 'cluster':
estimators = [est for est in estimators
if issubclass(est[1], ClusterMixin)]
elif type_filter is not None:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# We sort in order to have reproducible test failures
return sorted(estimators)
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
couchbase/ns_server | scripts/jq/master-events/plot-bucket-rebalance.py | 1 | 1570 | #!/usr/bin/env python3
"""
Copyright 2019-Present Couchbase, Inc.
Use of this software is governed by the Business Source License included in
the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that
file, in accordance with the Business Source License, use of this software will
be governed by the Apache License, Version 2.0, included in the file
licenses/APL2.txt.
"""
import json
import sys
import matplotlib.pyplot as plot
payload = json.load(sys.stdin)
bucket = payload['bucket']
vbuckets = []
active_moves = []
replica_moves = []
backfills = []
for i, move in enumerate(payload['moves']):
vbucket = move['vbucket']
x = move['start']
width = move['duration']
backfill_width = move['backfillDuration']
vbuckets.append(vbucket)
move_tuple = (i, x, width)
if move['type'] == 'active':
active_moves.append(move_tuple)
else:
replica_moves.append(move_tuple)
backfills.append((i, x, backfill_width))
plot.rcdefaults()
fig, ax = plot.subplots()
charts = [(active_moves, 'active moves', {'color': 'green'}),
(replica_moves, 'replica moves', {'color': 'orange'}),
(backfills, 'backfill phase', {'color': 'white', 'alpha': 0.5})]
for data, label, style in charts:
if len(data) > 0:
pos, lefts, widths = zip(*data)
ax.barh(pos, left=lefts, width=widths, label=label, **style)
ax.set_yticks(range(len(vbuckets)))
ax.set_yticklabels(vbuckets)
ax.invert_yaxis()
ax.set_ylabel('VBucket')
ax.set_xlabel('Time')
ax.set_title(bucket)
ax.legend()
plot.show()
| apache-2.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/types/api.py | 7 | 1720 | # flake8: noqa
import numpy as np
from .common import (pandas_dtype,
is_dtype_equal,
is_extension_type,
# categorical
is_categorical,
is_categorical_dtype,
# datetimelike
is_datetimetz,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime64_any_dtype,
is_datetime64_ns_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_period,
is_period_dtype,
# string-like
is_string_dtype,
is_object_dtype,
# sparse
is_sparse,
# numeric types
is_scalar,
is_sparse,
is_bool,
is_integer,
is_float,
is_complex,
is_number,
is_any_int_dtype,
is_integer_dtype,
is_int64_dtype,
is_numeric_dtype,
is_float_dtype,
is_floating_dtype,
is_bool_dtype,
is_complex_dtype,
# like
is_re,
is_re_compilable,
is_dict_like,
is_iterator,
is_list_like,
is_hashable,
is_named_tuple,
is_sequence)
| apache-2.0 |
mne-tools/mne-tools.github.io | 0.18/_downloads/820eb94c79d8a609d2b389a5808a87a4/plot_shift_evoked.py | 29 | 1245 | """
==================================
Shifting time-scale in evoked data
==================================
"""
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import tight_layout
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading evoked data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname, condition=condition, baseline=(None, 0),
proj=True)
ch_names = evoked.info['ch_names']
picks = mne.pick_channels(ch_names=ch_names, include=["MEG 2332"])
# Create subplots
f, (ax1, ax2, ax3) = plt.subplots(3)
evoked.plot(exclude=[], picks=picks, axes=ax1,
titles=dict(grad='Before time shifting'), time_unit='s')
# Apply relative time-shift of 500 ms
evoked.shift_time(0.5, relative=True)
evoked.plot(exclude=[], picks=picks, axes=ax2,
titles=dict(grad='Relative shift: 500 ms'), time_unit='s')
# Apply absolute time-shift of 500 ms
evoked.shift_time(0.5, relative=False)
evoked.plot(exclude=[], picks=picks, axes=ax3,
titles=dict(grad='Absolute shift: 500 ms'), time_unit='s')
tight_layout()
| bsd-3-clause |
MTgeophysics/mtpy | mtpy/modeling/ws3dinv.py | 1 | 280366 | # -*- coding: utf-8 -*-
"""
===============
ws3dinv
===============
* Deals with input and output files for ws3dinv written by:
Siripunvaraporn, W.; Egbert, G.; Lenbury, Y. & Uyeshima, M.
Three-dimensional magnetotelluric inversion: data-space method
Physics of The Earth and Planetary Interiors, 2005, 150, 3-14
* Dependencies: matplotlib 1.3.x, numpy 1.7.x, scipy 0.13
and evtk if vtk files want to be written.
The intended use or workflow is something like this for getting started:
:Making input files: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> import os
>>> #1) make a list of all .edi files that will be inverted for
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi) for edi in edi_path
>>> ... if edi.find('.edi') > 0]
>>> #2) make a grid from the stations themselves with 200m cell spacing
>>> wsmesh = ws.WSMesh(edi_list=edi_list, cell_size_east=200,
>>> ... cell_size_north=200)
>>> wsmesh.make_mesh()
>>> # check to see if the mesh is what you think it should be
>>> wsmesh.plot_mesh()
>>> # all is good write the mesh file
>>> wsmesh.write_initial_file(save_path=r"/home/ws3dinv/Inv1")
>>> # note this will write a file with relative station locations
>>> #change the starting model to be different than a halfspace
>>> mm = ws.WS3DModelManipulator(initial_fn=wsmesh.initial_fn)
>>> # an interactive gui will pop up to change the resistivity model
>>> #once finished write a new initial file
>>> mm.rewrite_initial_file()
>>> #3) write data file
>>> wsdata = ws.WSData(edi_list=edi_list, station_fn=wsmesh.station_fn)
>>> wsdata.write_data_file()
>>> #4) plot mt response to make sure everything looks ok
>>> rp = ws.PlotResponse(data_fn=wsdata.data_fn)
>>> #5) make startup file
>>> sws = ws.WSStartup(data_fn=wsdata.data_fn, initial_fn=mm.new_initial_fn)
:checking the model and response: ::
>>> mfn = r"/home/ws3dinv/Inv1/test_model.01"
>>> dfn = r"/home/ws3dinv/Inv1/WSDataFile.dat"
>>> rfn = r"/home/ws3dinv/Inv1/test_resp.01"
>>> sfn = r"/home/ws3dinv/Inv1/WS_Sation_Locations.txt"
>>> # plot the data vs. model response
>>> rp = ws.PlotResponse(data_fn=dfn, resp_fn=rfn, station_fn=sfn)
>>> # plot model slices where you can interactively step through
>>> ds = ws.PlotSlices(model_fn=mfn, station_fn=sfn)
>>> # plot phase tensor ellipses on top of depth slices
>>> ptm = ws.PlotPTMaps(data_fn=dfn, resp_fn=rfn, model_fn=mfn)
>>> #write files for 3D visualization in Paraview or Mayavi
>>> ws.write_vtk_files(mfn, sfn, r"/home/ParaviewFiles")
Created on Sun Aug 25 18:41:15 2013
@author: jpeacock-pr
"""
#==============================================================================
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib.patches import Ellipse
from matplotlib.colors import Normalize
import matplotlib.colorbar as mcb
import matplotlib.gridspec as gridspec
import mtpy.core.z as mtz
import mtpy.core.mt as mt
import mtpy.imaging.mtplottools as mtplottools
import matplotlib.widgets as widgets
import matplotlib.colors as colors
import matplotlib.cm as cm
import mtpy.modeling.winglink as wl
import mtpy.utils.exceptions as mtex
import mtpy.analysis.pt as mtpt
import mtpy.imaging.mtcolors as mtcl
try:
from evtk.hl import gridToVTK, pointsToVTK
except ImportError:
print ('If you want to write a vtk file for 3d viewing, you need download '
'and install evtk from https://bitbucket.org/pauloh/pyevtk')
print ('Note: if you are using Windows you should build evtk first with'
'either MinGW or cygwin using the command: \n'
' python setup.py build -compiler=mingw32 or \n'
' python setup.py build -compiler=cygwin')
#==============================================================================
#==============================================================================
# Data class
#==============================================================================
class WSData(object):
"""
Includes tools for reading and writing data files intended to be used with
ws3dinv.
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> import os
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi) for edi in edi_path
>>> ... if edi.find('.edi') > 0]
>>> # create an evenly space period list in log space
>>> p_list = np.logspace(np.log10(.001), np.log10(1000), 12)
>>> wsdata = ws.WSData(edi_list=edi_list, period_list=p_list,
>>> ... station_fn=r"/home/stations.txt")
>>> wsdata.write_data_file()
====================== ====================================================
Attributes Description
====================== ====================================================
data numpy structured array with keys:
* *station* --> station name
* *east* --> relative eastern location in
grid
* *north* --> relative northern location in
grid
* *z_data* --> impedance tensor array with
shape
(n_stations, n_freq, 4, dtype=complex)
* *z_data_err--> impedance tensor error without
error map applied
* *z_err_map --> error map from data file
data_fn full path to data file
edi_list list of edi files used to make data file
n_z [ 4 | 8 ] number of impedance tensor elements
*default* is 8
ncol number of columns in out file from winglink
*default* is 5
period_list list of periods to invert for
ptol if periods in edi files don't match period_list
then program looks for periods within ptol
*defualt* is .15 or 15 percent
rotation_angle Angle to rotate the data relative to north. Here
the angle is measure clockwise from North,
Assuming North is 0 and East is 90. Rotating data,
and grid to align with regional geoelectric strike
can improve the inversion. *default* is None
save_path path to save the data file
station_fn full path to station file written by WSStation
station_locations numpy structured array for station locations keys:
* *station* --> station name
* *east* --> relative eastern location in
grid
* *north* --> relative northern location in
grid
if input a station file is written
station_east relative locations of station in east direction
station_north relative locations of station in north direction
station_names names of stations
units [ 'mv' | 'else' ] units of Z, needs to be mv for
ws3dinv. *default* is 'mv'
wl_out_fn Winglink .out file which describes a 3D grid
wl_site_fn Wingling .sites file which gives station locations
z_data impedance tensors of data with shape:
(n_station, n_periods, 2, 2)
z_data_err error of data impedance tensors with error map
applied, shape (n_stations, n_periods, 2, 2)
z_err [ float | 'data' ]
'data' to set errors as data errors or
give a percent error to impedance tensor elements
*default* is .05 or 5% if given as percent, ie. 5%
then it is converted to .05.
z_err_floor percent error floor, anything below this error will
be set to z_err_floor. *default* is None
z_err_map [zxx, zxy, zyx, zyy] for n_z = 8
[zxy, zyx] for n_z = 4
Value in percent to multiply the error by, which
give the user power to down weight bad data, so
the resulting error will be z_err_map*z_err
====================== ====================================================
====================== ====================================================
Methods Description
====================== ====================================================
build_data builds the data from .edi files
write_data_file writes a data file from attribute data. This way
you can read in a data file, change some parameters
and rewrite.
read_data_file reads in a ws3dinv data file
====================== ====================================================
"""
def __init__(self, **kwargs):
self.save_path = kwargs.pop('save_path', None)
self.data_basename = kwargs.pop('data_basename', 'WSDataFile.dat')
self.units = kwargs.pop('units', 'mv')
self.ncol = kwargs.pop('ncols', 5)
self.ptol = kwargs.pop('ptol', 0.15)
self.z_err = kwargs.pop('z_err', 0.05)
self.z_err_floor = kwargs.pop('z_err_floor', None)
self.z_err_map = kwargs.pop('z_err_map', [10,1,1,10])
self.n_z = kwargs.pop('n_z', 8)
self.period_list = kwargs.pop('period_list', None)
self.edi_list = kwargs.pop('edi_list', None)
self.station_locations = kwargs.pop('station_locations', None)
self.rotation_angle = kwargs.pop('roatation_angle', None)
self.station_east = None
self.station_north = None
self.station_names = None
self.z_data = None
self.z_data_err = None
self.wl_site_fn = kwargs.pop('wl_site_fn', None)
self.wl_out_fn = kwargs.pop('wl_out_fn', None)
self.data_fn = kwargs.pop('data_fn', None)
self.station_fn = kwargs.pop('station_fn', None)
self.data = None
# make sure the error given is a decimal percent
if type(self.z_err) is not str and self.z_err > 1:
self.z_err /= 100.
# make sure the error floor given is a decimal percent
if self.z_err_floor is not None and self.z_err_floor > 1:
self.z_err_floor /= 100.
def build_data(self):
"""
Builds the data from .edi files to be written into a data file
Need to call this if any parameters have been reset to write a
correct data file.
"""
if self.edi_list is None:
raise WSInputError('Need to input a list of .edi files to build '
'the data')
if self.period_list is None:
raise WSInputError('Need to input a list of periods to extract '
'from the .edi files.' )
#get units correctly
if self.units == 'mv':
zconv = 1./796.
self.period_list = np.array(self.period_list)
#define some lengths
n_stations = len(self.edi_list)
n_periods = len(self.period_list)
#make a structured array to keep things in for convenience
z_shape = (n_periods, 2, 2)
data_dtype = [('station', '|S10'),
('east', np.float),
('north', np.float),
('z_data', (np.complex, z_shape)),
('z_data_err', (np.complex, z_shape)),
('z_err_map', (np.complex, z_shape))]
self.data = np.zeros(n_stations, dtype=data_dtype)
#------get station locations-------------------------------------------
if self.wl_site_fn != None:
if self.wl_out_fn is None:
raise IOError('Need to input an .out file to get station'
'locations, this should be output by Winglink')
#get x and y locations on a relative grid
east_list, north_list, station_list = \
wl.get_station_locations(self.wl_site_fn,
self.wl_out_fn,
ncol=self.ncol)
self.data['station'] = station_list
self.data['east'] = east_list
self.data['north'] = north_list
#if a station location file is input
if self.station_fn != None:
stations = WSStation(self.station_fn)
stations.read_station_file()
self.data['station'] = stations.names
self.data['east'] = stations.east
self.data['north'] = stations.north
#if the user made a grid in python or some other fashion
if self.station_locations != None:
try:
for dd, sd in enumerate(self.station_locations):
self.data['east'][dd] = sd['east_c']
self.data['north'][dd] = sd['north_c']
self.data['station'][dd] = sd['station']
stations = WSStation()
stations.station_fn = os.path.join(self.save_path,
'WS_Station_locations.txt')
stations.east = self.data['east']
stations.north = self.data['north']
stations.names = self.data['station']
stations.write_station_file()
except (KeyError, ValueError):
self.data['east'] = self.station_locations[:, 0]
self.data['north']= self.station_locations[:, 1]
#--------find frequencies----------------------------------------------
for ss, edi in enumerate(self.edi_list):
if not os.path.isfile(edi):
raise IOError('Could not find '+edi)
mt_obj = mt.MT(edi)
if self.rotation_angle is not None:
mt_obj.rotation_angle = self.rotation_angle
print('{0}{1}{0}'.format('-'*20, mt_obj.station))
# get only those periods that are within the station data
interp_periods = self.period_list[np.where(
(self.period_list >= 1./mt_obj.Z.freq.max()) &
(self.period_list <= 1./mt_obj.Z.freq.min()))]
#interpolate over those periods
interp_z, interp_t = mt_obj.interpolate(1./interp_periods)
for kk, ff in enumerate(interp_periods):
jj = np.where(self.period_list == ff)[0][0]
print(' {0:.6g} (s)'.format(ff))
self.data[ss]['z_data'][jj, :] = interp_z.z[kk, :, :]*zconv
self.data[ss]['z_data_err'][jj, :] = interp_z.z_err[kk, :, :]*zconv
def compute_errors(self):
"""
compute the errors from the given attributes
"""
for d_arr in self.data:
if self.z_err == 'data':
pass
elif self.z_err_floor is None and type(self.z_err) is float:
d_arr['z_data_err'][:] = d_arr['z_data'][:]*self.z_err
elif self.z_err_floor is not None:
ef_idx = np.where(d_arr['z_data_err'] < self.z_err_floor)
d_arr['z_data_err'][ef_idx] = d_arr['z_data'][ef_idx]*self.z_err_floor
d_arr['z_err_map'] = np.reshape(len(self.period_list)*self.z_err_map,
(len(self.period_list), 2, 2))
def write_data_file(self, **kwargs):
"""
Writes a data file based on the attribute data
Key Word Arguments:
---------------------
**data_fn** : string
full path to data file name
**save_path** : string
directory path to save data file, will be written
as save_path/data_basename
**data_basename** : string
basename of data file to be saved as
save_path/data_basename
*default* is WSDataFile.dat
.. note:: if any of the data attributes have been reset, be sure
to call build_data() before write_data_file.
"""
if self.data is None:
self.build_data()
# compute errors, this helps when rewriting a data file
self.compute_errors()
for key in ['data_fn', 'save_path', 'data_basename']:
try:
setattr(self, key, kwargs[key])
except KeyError:
pass
#create the output filename
if self.save_path == None:
if self.wl_out_fn is not None:
self.save_path = os.path.dirname(self.wl_site_fn)
else:
self.save_path = os.getcwd()
self.data_fn = os.path.join(self.save_path, self.data_basename)
elif os.path.isdir(self.save_path) == True:
self.data_fn = os.path.join(self.save_path, self.data_basename)
else:
self.data_fn = self.save_path
#-----Write data file--------------------------------------------------
n_stations = len(self.data)
n_periods = self.data[0]['z_data'].shape[0]
ofid = file(self.data_fn, 'w')
ofid.write('{0:d} {1:d} {2:d}\n'.format(n_stations, n_periods,
self.n_z))
#write N-S locations
ofid.write('Station_Location: N-S \n')
for ii in range(n_stations/self.n_z+1):
for ll in range(self.n_z):
index = ii*self.n_z+ll
try:
ofid.write('{0:+.4e} '.format(self.data['north'][index]))
except IndexError:
pass
ofid.write('\n')
#write E-W locations
ofid.write('Station_Location: E-W \n')
for ii in range(n_stations/self.n_z+1):
for ll in range(self.n_z):
index = ii*self.n_z+ll
try:
ofid.write('{0:+.4e} '.format(self.data['east'][index]))
except IndexError:
pass
ofid.write('\n')
#write impedance tensor components
for ii, p1 in enumerate(self.period_list):
ofid.write('DATA_Period: {0:3.6f}\n'.format(p1))
for ss in range(n_stations):
zline = self.data[ss]['z_data'][ii].reshape(4,)
for jj in range(self.n_z/2):
ofid.write('{0:+.4e} '.format(zline[jj].real))
ofid.write('{0:+.4e} '.format(-zline[jj].imag))
ofid.write('\n')
#write error as a percentage of Z
for ii, p1 in enumerate(self.period_list):
ofid.write('ERROR_Period: {0:3.6f}\n'.format(p1))
for ss in range(n_stations):
zline = self.data[ss]['z_data_err'][ii].reshape(4,)
for jj in range(self.n_z/2):
ofid.write('{0:+.4e} '.format(zline[jj].real))
ofid.write('{0:+.4e} '.format(zline[jj].imag))
ofid.write('\n')
#write error maps
for ii, p1 in enumerate(self.period_list):
ofid.write('ERMAP_Period: {0:3.6f}\n'.format(p1))
for ss in range(n_stations):
zline = self.data[ss]['z_err_map'][ii].reshape(4,)
for jj in range(self.n_z/2):
ofid.write('{0:.5e} '.format(self.z_err_map[jj]))
ofid.write('{0:.5e} '.format(self.z_err_map[jj]))
ofid.write('\n')
ofid.close()
print('Wrote file to: {0}'.format(self.data_fn))
self.station_east = self.data['east']
self.station_north = self.data['north']
self.station_names = self.data['station']
self.z_data = self.data['z_data']
self.z_data_err = self.data['z_data_err']*self.data['z_err_map']
def read_data_file(self, data_fn=None, wl_sites_fn=None, station_fn=None):
"""
read in data file
Arguments:
-----------
**data_fn** : string
full path to data file
**wl_sites_fn** : string
full path to sites file output by winglink.
This is to match the station name with station
number.
**station_fn** : string
full path to station location file written by
WSStation
Fills Attributes:
------------------
**data** : structure np.ndarray
fills the attribute WSData.data with values
**period_list** : np.ndarray()
fills the period list with values.
"""
if self.units == 'mv':
zconv = 796.
else:
zconv = 1
if data_fn is not None:
self.data_fn = data_fn
if self.data_fn is None:
raise WSInputError('Need to input a data file')
if os.path.isfile(self.data_fn) is False:
raise WSInputError('Could not find {0}, check path'.format(
self.data_fn))
self.save_path = os.path.dirname(self.data_fn)
dfid = file(self.data_fn, 'r')
dlines = dfid.readlines()
#get size number of stations, number of frequencies,
# number of Z components
n_stations, n_periods, nz = np.array(dlines[0].strip().split(),
dtype='int')
nsstart = 2
self.n_z = nz
#make a structured array to keep things in for convenience
z_shape = (n_periods, 2, 2)
data_dtype = [('station', '|S10'),
('east', np.float),
('north', np.float),
('z_data', (np.complex, z_shape)),
('z_data_err', (np.complex, z_shape)),
('z_err_map', (np.complex, z_shape))]
self.data = np.zeros(n_stations, dtype=data_dtype)
findlist = []
for ii, dline in enumerate(dlines[1:50], 1):
if dline.find('Station_Location: N-S') == 0:
findlist.append(ii)
elif dline.find('Station_Location: E-W') == 0:
findlist.append(ii)
elif dline.find('DATA_Period:') == 0:
findlist.append(ii)
ncol = len(dlines[nsstart].strip().split())
#get site names if entered a sites file
if wl_sites_fn != None:
self.wl_site_fn = wl_sites_fn
slist, station_list = wl.read_sites_file(self.wl_sites_fn)
self.data['station'] = station_list
elif station_fn != None:
self.station_fn = station_fn
stations = WSStation(self.station_fn)
stations.read_station_file()
self.data['station'] = stations.names
else:
self.data['station'] = np.arange(n_stations)
#get N-S locations
for ii, dline in enumerate(dlines[findlist[0]+1:findlist[1]],0):
dline = dline.strip().split()
for jj in range(ncol):
try:
self.data['north'][ii*ncol+jj] = float(dline[jj])
except IndexError:
pass
except ValueError:
break
#get E-W locations
for ii, dline in enumerate(dlines[findlist[1]+1:findlist[2]],0):
dline = dline.strip().split()
for jj in range(self.n_z):
try:
self.data['east'][ii*ncol+jj] = float(dline[jj])
except IndexError:
pass
except ValueError:
break
#make some empty array to put stuff into
self.period_list = np.zeros(n_periods)
#get data
per = 0
error_find = False
errmap_find = False
for ii, dl in enumerate(dlines[findlist[2]:]):
if dl.lower().find('period') > 0:
st = 0
if dl.lower().find('data') == 0:
dkey = 'z_data'
self.period_list[per] = float(dl.strip().split()[1])
elif dl.lower().find('error') == 0:
dkey = 'z_data_err'
if not error_find:
error_find = True
per = 0
elif dl.lower().find('ermap') == 0:
dkey = 'z_err_map'
if not errmap_find:
errmap_find = True
per = 0
#print '-'*20+dkey+'-'*20
per += 1
else:
if dkey == 'z_err_map':
zline = np.array(dl.strip().split(), dtype=np.float)
self.data[st][dkey][per-1,:] = np.array([[zline[0]-1j*zline[1],
zline[2]-1j*zline[3]],
[zline[4]-1j*zline[5],
zline[6]-1j*zline[7]]])
else:
zline = np.array(dl.strip().split(), dtype=np.float)*zconv
self.data[st][dkey][per-1,:] = np.array([[zline[0]-1j*zline[1],
zline[2]-1j*zline[3]],
[zline[4]-1j*zline[5],
zline[6]-1j*zline[7]]])
st += 1
self.station_east = self.data['east']
self.station_north = self.data['north']
self.station_names = self.data['station']
self.z_data = self.data['z_data']
#need to be careful when multiplying complex numbers
self.z_data_err = \
self.data['z_data_err'].real*self.data['z_err_map'].real+1j*\
self.data['z_data_err'].imag*self.data['z_err_map'].imag
#make station_locations structure array
self.station_locations = np.zeros(len(self.station_east),
dtype=[('station','|S10'),
('east', np.float),
('north', np.float),
('east_c', np.float),
('north_c', np.float)])
self.station_locations['east'] = self.data['east']
self.station_locations['north'] = self.data['north']
self.station_locations['station'] = self.data['station']
#==============================================================================
# stations
#==============================================================================
class WSStation(object):
"""
read and write a station file where the locations are relative to the
3D mesh.
==================== ======================================================
Attributes Description
==================== ======================================================
east array of relative locations in east direction
elev array of elevations for each station
names array of station names
north array of relative locations in north direction
station_fn full path to station file
save_path path to save file to
==================== ======================================================
==================== ======================================================
Methods Description
==================== ======================================================
read_station_file reads in a station file
write_station_file writes a station file
write_vtk_file writes a vtk points file for station locations
==================== ======================================================
"""
def __init__(self, station_fn=None, **kwargs):
self.station_fn = station_fn
self.east = kwargs.pop('east', None)
self.north = kwargs.pop('north', None)
self.elev = kwargs.pop('elev', None)
self.names = kwargs.pop('names', None)
self.save_path = kwargs.pop('save_path', None)
def write_station_file(self, east=None, north=None, station_list=None,
save_path=None, elev=None):
"""
write a station file to go with the data file.
the locations are on a relative grid where (0, 0, 0) is the
center of the grid. Also, the stations are assumed to be in the center
of the cell.
Arguments:
-----------
**east** : np.ndarray(n_stations)
relative station locations in east direction
**north** : np.ndarray(n_stations)
relative station locations in north direction
**elev** : np.ndarray(n_stations)
relative station locations in vertical direction
**station_list** : list or np.ndarray(n_stations)
name of stations
**save_path** : string
directory or full path to save station file to
if a directory the file will be saved as
save_path/WS_Station_Locations.txt
if save_path is none the current working directory
is used as save_path
Outputs:
---------
**station_fn** : full path to station file
"""
if east is not None:
self.east = east
if north is not None:
self.north = north
if station_list is not None:
self.names = station_list
if elev is not None:
self.elev = elev
else:
if self.north is not None:
self.elev = np.zeros_like(self.north)
if save_path is not None:
self.save_path = save_path
if os.path.isdir(self.save_path):
self.station_fn = os.path.join(self.save_path,
'WS_Station_Locations.txt')
else:
self.station_fn = save_path
elif self.save_path is None:
self.save_path = os.getcwd()
self.station_fn = os.path.join(self.save_path,
'WS_Station_Locations.txt')
elif os.path.isdir(self.save_path):
self.station_fn = os.path.join(self.save_path,
'WS_Station_Locations.txt')
sfid = file(self.station_fn, 'w')
sfid.write('{0:<14}{1:^14}{2:^14}{3:^14}\n'.format('station', 'east',
'north', 'elev'))
for ee, nn, zz, ss in zip(self.east, self.north, self.elev, self.names):
ee = '{0:+.4e}'.format(ee)
nn = '{0:+.4e}'.format(nn)
zz = '{0:+.4e}'.format(zz)
sfid.write('{0:<14}{1:^14}{2:^14}{3:^14}\n'.format(ss, ee, nn, zz))
sfid.close()
print('Wrote station locations to {0}'.format(self.station_fn))
def read_station_file(self, station_fn=None):
"""
read in station file written by write_station_file
Arguments:
----------
**station_fn** : string
full path to station file
Outputs:
---------
**east** : np.ndarray(n_stations)
relative station locations in east direction
**north** : np.ndarray(n_stations)
relative station locations in north direction
**elev** : np.ndarray(n_stations)
relative station locations in vertical direction
**station_list** : list or np.ndarray(n_stations)
name of stations
"""
if station_fn is not None:
self.station_fn = station_fn
self.save_path = os.path.dirname(self.station_fn)
self.station_locations = np.loadtxt(self.station_fn, skiprows=1,
dtype=[('station', '|S10'),
('east_c', np.float),
('north_c', np.float),
('elev', np.float)])
self.east = self.station_locations['east_c']
self.north = self.station_locations['north_c']
self.names = self.station_locations['station']
self.elev = self.station_locations['elev']
def write_vtk_file(self, save_path, vtk_basename='VTKStations'):
"""
write a vtk file to plot stations
Arguments:
------------
**save_path** : string
directory to save file to. Will save as
save_path/vtk_basename
**vtk_basename** : string
base file name for vtk file, extension is
automatically added.
"""
if os.path.isdir(save_path) == True:
save_fn = os.path.join(save_path, vtk_basename)
if self.elev is None:
self.elev = np.zeros_like(self.north)
pointsToVTK(save_fn, self.north, self.east, self.elev,
data={'value':np.ones_like(self.north)})
return save_fn
def from_wl_write_station_file(self, sites_file, out_file, ncol=5):
"""
write a ws station file from the outputs of winglink
Arguments:
-----------
**sites_fn** : string
full path to sites file output from winglink
**out_fn** : string
full path to .out file output from winglink
**ncol** : int
number of columns the data is in
*default* is 5
"""
wl_east, wl_north, wl_station_list = wl.get_station_locations(
sites_file,
out_file,
ncol=ncol)
self.write_station_file(east=wl_east, north=wl_north,
station_list=wl_station_list)
#==============================================================================
# mesh class
#==============================================================================
class WSMesh(object):
"""
make and read a FE mesh grid
The mesh assumes the coordinate system where:
x == North
y == East
z == + down
All dimensions are in meters.
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> import os
>>> #1) make a list of all .edi files that will be inverted for
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi) for edi in edi_path
>>> ... if edi.find('.edi') > 0]
>>> #2) make a grid from the stations themselves with 200m cell spacing
>>> wsmesh = ws.WSMesh(edi_list=edi_list, cell_size_east=200,
>>> ... cell_size_north=200)
>>> wsmesh.make_mesh()
>>> # check to see if the mesh is what you think it should be
>>> wsmesh.plot_mesh()
>>> # all is good write the mesh file
>>> wsmesh.write_initial_file(save_path=r"/home/ws3dinv/Inv1")
==================== ======================================================
Attributes Description
==================== ======================================================
cell_size_east mesh block width in east direction
*default* is 500
cell_size_north mesh block width in north direction
*default* is 500
edi_list list of .edi files to invert for
grid_east overall distance of grid nodes in east direction
grid_north overall distance of grid nodes in north direction
grid_z overall distance of grid nodes in z direction
initial_fn full path to initial file name
n_layers total number of vertical layers in model
nodes_east relative distance between nodes in east direction
nodes_north relative distance between nodes in north direction
nodes_z relative distance between nodes in east direction
pad_east number of cells for padding on E and W sides
*default* is 5
pad_north number of cells for padding on S and N sides
*default* is 5
pad_root_east padding cells E & W will be pad_root_east**(x)
pad_root_north padding cells N & S will be pad_root_north**(x)
pad_z number of cells for padding at bottom
*default* is 5
res_list list of resistivity values for starting model
res_model starting resistivity model
rotation_angle Angle to rotate the grid to. Angle is measured
positve clockwise assuming North is 0 and east is 90.
*default* is None
save_path path to save file to
station_fn full path to station file
station_locations location of stations
title title in initial file
z1_layer first layer thickness
z_bottom absolute bottom of the model *default* is 300,000
z_target_depth Depth of deepest target, *default* is 50,000
==================== ======================================================
==================== ======================================================
Methods Description
==================== ======================================================
make_mesh makes a mesh from the given specifications
plot_mesh plots mesh to make sure everything is good
write_initial_file writes an initial model file that includes the mesh
==================== ======================================================
"""
def __init__(self, edi_list=None, **kwargs):
self.edi_list = edi_list
# size of cells within station area in meters
self.cell_size_east = kwargs.pop('cell_size_east', 500)
self.cell_size_north = kwargs.pop('cell_size_north', 500)
#padding cells on either side
self.pad_east = kwargs.pop('pad_east', 5)
self.pad_north = kwargs.pop('pad_north', 5)
self.pad_z = kwargs.pop('pad_z', 5)
#root of padding cells
self.pad_root_east = kwargs.pop('pad_root_east', 5)
self.pad_root_north = kwargs.pop('pad_root_north', 5)
self.z1_layer = kwargs.pop('z1_layer', 10)
self.z_target_depth = kwargs.pop('z_target_depth', 50000)
self.z_bottom = kwargs.pop('z_bottom', 300000)
#number of vertical layers
self.n_layers = kwargs.pop('n_layers', 30)
#--> attributes to be calculated
#station information
self.station_locations = kwargs.pop('station_locations', None)
#grid nodes
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
#grid locations
self.grid_east = None
self.grid_north = None
self.grid_z = None
#resistivity model
self.res_model = None
self.res_list = None
self.res_model_int = None
#rotation angle
self.rotation_angle = kwargs.pop('rotation_angle', 0.0)
#inital file stuff
self.initial_fn = None
self.station_fn = None
self.save_path = kwargs.pop('save_path', None)
self.title = 'Inital Model File made in MTpy'
def make_mesh(self):
"""
create finite element mesh according to parameters set.
The mesh is built by first finding the center of the station area.
Then cells are added in the north and east direction with width
cell_size_east and cell_size_north to the extremeties of the station
area. Padding cells are then added to extend the model to reduce
edge effects. The number of cells are pad_east and pad_north and the
increase in size is by pad_root_east and pad_root_north. The station
locations are then computed as the center of the nearest cell as
required by the code.
The vertical cells are built to increase in size exponentially with
depth. The first cell depth is first_layer_thickness and should be
about 1/10th the shortest skin depth. The layers then increase
on a log scale to z_target_depth. Then the model is
padded with pad_z number of cells to extend the depth of the model.
padding = np.round(cell_size_east*pad_root_east**np.arange(start=.5,
stop=3, step=3./pad_east))+west
"""
#if station locations are not input read from the edi files
if self.station_locations is None:
if self.edi_list is None:
raise AttributeError('edi_list is None, need to input a list of '
'edi files to read in.')
n_stations = len(self.edi_list)
#make a structured array to put station location information into
self.station_locations = np.zeros(n_stations,
dtype=[('station','|S10'),
('east', np.float),
('north', np.float),
('east_c', np.float),
('north_c', np.float),
('elev', np.float)])
#get station locations in meters
for ii, edi in enumerate(self.edi_list):
mt_obj = mt.MT(edi)
self.station_locations[ii]['station'] = mt_obj.station
self.station_locations[ii]['east'] = mt_obj.east
self.station_locations[ii]['north'] = mt_obj.north
self.station_locations[ii]['elev'] = mt_obj.elev
#--> rotate grid if necessary
#to do this rotate the station locations because ModEM assumes the
#input mesh is a lateral grid.
#needs to be 90 - because North is assumed to be 0 but the rotation
#matrix assumes that E is 0.
if self.rotation_angle != 0:
cos_ang = np.cos(np.deg2rad(self.rotation_angle))
sin_ang = np.sin(np.deg2rad(self.rotation_angle))
rot_matrix = np.matrix(np.array([[cos_ang, sin_ang],
[-sin_ang, cos_ang]]))
coords = np.array([self.station_locations['east'],
self.station_locations['north']])
#rotate the relative station locations
new_coords = np.array(np.dot(rot_matrix, coords))
self.station_locations['east'][:] = new_coords[0, :]
self.station_locations['north'][:] = new_coords[1, :]
print('Rotated stations by {0:.1f} deg clockwise from N'.format(
self.rotation_angle))
#remove the average distance to get coordinates in a relative space
self.station_locations['east'] -= self.station_locations['east'].mean()
self.station_locations['north'] -= self.station_locations['north'].mean()
#translate the stations so they are relative to 0,0
east_center = (self.station_locations['east'].max()-
np.abs(self.station_locations['east'].min()))/2
north_center = (self.station_locations['north'].max()-
np.abs(self.station_locations['north'].min()))/2
#remove the average distance to get coordinates in a relative space
self.station_locations['east'] -= east_center
self.station_locations['north'] -= north_center
#pickout the furtherst south and west locations
#and put that station as the bottom left corner of the main grid
west = self.station_locations['east'].min()-(1.5*self.cell_size_east)
east = self.station_locations['east'].max()+(1.5*self.cell_size_east)
south = self.station_locations['north'].min()-(1.5*self.cell_size_north)
north = self.station_locations['north'].max()+(1.5*self.cell_size_north)
#make sure the variable n_stations is initialized
try:
n_stations
except NameError:
n_stations = self.station_locations.shape[0]
#-------make a grid around the stations from the parameters above------
#--> make grid in east-west direction
#cells within station area
midxgrid = np.arange(start=west,
stop=east+self.cell_size_east,
step=self.cell_size_east)
#padding cells on the west side
pad_west = np.round(-self.cell_size_east*\
self.pad_root_east**np.arange(start=.5, stop=3,
step=3./self.pad_east))+west
#padding cells on east side
pad_east = np.round(self.cell_size_east*\
self.pad_root_east**np.arange(start=.5, stop=3,
step=3./self.pad_east))+east
#make the cells going west go in reverse order and append them to the
#cells going east
east_gridr = np.append(np.append(pad_west[::-1], midxgrid), pad_east)
#--> make grid in north-south direction
#N-S cells with in station area
midygrid = np.arange(start=south,
stop=north+self.cell_size_north,
step=self.cell_size_north)
#padding cells on south side
south_pad = np.round(-self.cell_size_north*
self.pad_root_north**np.arange(start=.5,
stop=3, step=3./self.pad_north))+south
#padding cells on north side
north_pad = np.round(self.cell_size_north*
self.pad_root_north**np.arange(start=.5,
stop=3, step=3./self.pad_north))+north
#make the cells going west go in reverse order and append them to the
#cells going east
north_gridr = np.append(np.append(south_pad[::-1], midygrid), north_pad)
#--> make depth grid
log_z = np.logspace(np.log10(self.z1_layer),
np.log10(self.z_target_depth-np.logspace(np.log10(self.z1_layer),
np.log10(self.z_target_depth),
num=self.n_layers)[-2]),
num=self.n_layers-self.pad_z)
ztarget = np.array([zz-zz%10**np.floor(np.log10(zz)) for zz in
log_z])
log_zpad = np.logspace(np.log10(self.z_target_depth),
np.log10(self.z_bottom-np.logspace(np.log10(self.z_target_depth),
np.log10(self.z_bottom),
num=self.pad_z)[-2]),
num=self.pad_z)
zpadding = np.array([zz-zz%10**np.floor(np.log10(zz)) for zz in
log_zpad])
z_nodes = np.append(ztarget, zpadding)
z_grid = np.array([z_nodes[:ii+1].sum() for ii in range(z_nodes.shape[0])])
#---Need to make an array of the individual cell dimensions for
# wsinv3d
east_nodes = east_gridr.copy()
nx = east_gridr.shape[0]
east_nodes[:nx/2] = np.array([abs(east_gridr[ii]-east_gridr[ii+1])
for ii in range(int(nx/2))])
east_nodes[nx/2:] = np.array([abs(east_gridr[ii]-east_gridr[ii+1])
for ii in range(int(nx/2)-1, nx-1)])
north_nodes = north_gridr.copy()
ny = north_gridr.shape[0]
north_nodes[:ny/2] = np.array([abs(north_gridr[ii]-north_gridr[ii+1])
for ii in range(int(ny/2))])
north_nodes[ny/2:] = np.array([abs(north_gridr[ii]-north_gridr[ii+1])
for ii in range(int(ny/2)-1, ny-1)])
#--put the grids into coordinates relative to the center of the grid
east_grid = east_nodes.copy()
east_grid[:int(nx/2)] = -np.array([east_nodes[ii:int(nx/2)].sum()
for ii in range(int(nx/2))])
east_grid[int(nx/2):] = np.array([east_nodes[int(nx/2):ii+1].sum()
for ii in range(int(nx/2), nx)])-\
east_nodes[int(nx/2)]
north_grid = north_nodes.copy()
north_grid[:int(ny/2)] = -np.array([north_nodes[ii:int(ny/2)].sum()
for ii in range(int(ny/2))])
north_grid[int(ny/2):] = np.array([north_nodes[int(ny/2):ii+1].sum()
for ii in range(int(ny/2),ny)])-\
north_nodes[int(ny/2)]
#make nodes attributes
self.nodes_east = east_nodes
self.nodes_north = north_nodes
self.nodes_z = z_nodes
self.grid_east = east_grid
self.grid_north = north_grid
self.grid_z = z_grid
#make sure that the stations are in the center of the cell as requested
#by the code.
for ii in range(n_stations):
#look for the closest grid line
xx = [nn for nn, xf in enumerate(east_grid)
if xf>(self.station_locations[ii]['east']-self.cell_size_east)
and xf<(self.station_locations[ii]['east']+self.cell_size_east)]
#shift the station to the center in the east-west direction
if east_grid[xx[0]] < self.station_locations[ii]['east']:
self.station_locations[ii]['east_c'] = \
east_grid[xx[0]]+self.cell_size_east/2
elif east_grid[xx[0]] > self.station_locations[ii]['east']:
self.station_locations[ii]['east_c'] = \
east_grid[xx[0]]-self.cell_size_east/2
#look for closest grid line
yy = [mm for mm, yf in enumerate(north_grid)
if yf>(self.station_locations[ii]['north']-self.cell_size_north)
and yf<(self.station_locations[ii]['north']+self.cell_size_north)]
#shift station to center of cell in north-south direction
if north_grid[yy[0]] < self.station_locations[ii]['north']:
self.station_locations[ii]['north_c'] = \
north_grid[yy[0]]+self.cell_size_north/2
elif north_grid[yy[0]] > self.station_locations[ii]['north']:
self.station_locations[ii]['north_c'] = \
north_grid[yy[0]]-self.cell_size_north/2
#--> print out useful information
print('-'*15)
print(' Number of stations = {0}'.format(len(self.station_locations)))
print(' Dimensions: ')
print(' e-w = {0}'.format(east_grid.shape[0]))
print(' n-s = {0}'.format(north_grid.shape[0]))
print(' z = {0} (without 7 air layers)'.format(z_grid.shape[0]))
print(' Extensions: ')
print(' e-w = {0:.1f} (m)'.format(east_nodes.__abs__().sum()))
print(' n-s = {0:.1f} (m)'.format(north_nodes.__abs__().sum()))
print(' 0-z = {0:.1f} (m)'.format(self.nodes_z.__abs__().sum()))
print('-'*15)
#write a station location file for later
stations = WSStation()
stations.write_station_file(east=self.station_locations['east_c'],
north=self.station_locations['north_c'],
elev=self.station_locations['elev'],
station_list=self.station_locations['station'],
save_path=self.save_path)
self.station_fn = stations.station_fn
def plot_mesh(self, east_limits=None, north_limits=None, z_limits=None,
**kwargs):
"""
Arguments:
----------
**east_limits** : tuple (xmin,xmax)
plot min and max distances in meters for the
E-W direction. If None, the east_limits
will be set to furthest stations east and west.
*default* is None
**north_limits** : tuple (ymin,ymax)
plot min and max distances in meters for the
N-S direction. If None, the north_limits
will be set to furthest stations north and south.
*default* is None
**z_limits** : tuple (zmin,zmax)
plot min and max distances in meters for the
vertical direction. If None, the z_limits is
set to the number of layers. Z is positive down
*default* is None
"""
fig_size = kwargs.pop('fig_size', [6, 6])
fig_dpi = kwargs.pop('fig_dpi', 300)
fig_num = kwargs.pop('fig_num', 1)
station_marker = kwargs.pop('station_marker', 'v')
marker_color = kwargs.pop('station_color', 'b')
marker_size = kwargs.pop('marker_size', 2)
line_color = kwargs.pop('line_color', 'k')
line_width = kwargs.pop('line_width', .5)
plt.rcParams['figure.subplot.hspace'] = .3
plt.rcParams['figure.subplot.wspace'] = .3
plt.rcParams['figure.subplot.left'] = .08
plt.rcParams['font.size'] = 7
fig = plt.figure(fig_num, figsize=fig_size, dpi=fig_dpi)
plt.clf()
#---plot map view
ax1 = fig.add_subplot(1, 2, 1, aspect='equal')
#make sure the station is in the center of the cell
ax1.scatter(self.station_locations['east_c'],
self.station_locations['north_c'],
marker=station_marker,
c=marker_color,
s=marker_size)
#plot the grid if desired
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
east_line_ylist.append(None)
ax1.plot(east_line_xlist,
east_line_ylist,
lw=line_width,
color=line_color)
north_line_xlist = []
north_line_ylist = []
for yy in self.grid_north:
north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
north_line_xlist.append(None)
north_line_ylist.extend([yy, yy])
north_line_ylist.append(None)
ax1.plot(north_line_xlist,
north_line_ylist,
lw=line_width,
color=line_color)
if east_limits == None:
ax1.set_xlim(self.station_locations['east'].min()-\
10*self.cell_size_east,
self.station_locations['east'].max()+\
10*self.cell_size_east)
else:
ax1.set_xlim(east_limits)
if north_limits == None:
ax1.set_ylim(self.station_locations['north'].min()-\
10*self.cell_size_north,
self.station_locations['north'].max()+\
10*self.cell_size_east)
else:
ax1.set_ylim(north_limits)
ax1.set_ylabel('Northing (m)', fontdict={'size':9,'weight':'bold'})
ax1.set_xlabel('Easting (m)', fontdict={'size':9,'weight':'bold'})
##----plot depth view
ax2 = fig.add_subplot(1, 2, 2, aspect='auto', sharex=ax1)
#plot the grid if desired
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([0,
self.grid_z.max()])
east_line_ylist.append(None)
ax2.plot(east_line_xlist,
east_line_ylist,
lw=line_width,
color=line_color)
z_line_xlist = []
z_line_ylist = []
for zz in self.grid_z:
z_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
z_line_xlist.append(None)
z_line_ylist.extend([zz, zz])
z_line_ylist.append(None)
ax2.plot(z_line_xlist,
z_line_ylist,
lw=line_width,
color=line_color)
#--> plot stations
ax2.scatter(self.station_locations['east_c'],
[0]*self.station_locations.shape[0],
marker=station_marker,
c=marker_color,
s=marker_size)
if z_limits == None:
ax2.set_ylim(self.z_target_depth, -200)
else:
ax2.set_ylim(z_limits)
if east_limits == None:
ax1.set_xlim(self.station_locations['east'].min()-\
10*self.cell_size_east,
self.station_locations['east'].max()+\
10*self.cell_size_east)
else:
ax1.set_xlim(east_limits)
ax2.set_ylabel('Depth (m)', fontdict={'size':9, 'weight':'bold'})
ax2.set_xlabel('Easting (m)', fontdict={'size':9, 'weight':'bold'})
plt.show()
def convert_model_to_int(self):
"""
convert the resistivity model that is in ohm-m to integer values
corresponding to res_list
"""
self.res_model_int = np.ones_like(self.res_model)
#make a dictionary of values to write to file.
self.res_dict = dict([(res, ii)
for ii, res in
enumerate(sorted(self.res_list), 1)])
for ii, res in enumerate(self.res_list):
indexes = np.where(self.res_model == res)
self.res_model_int[indexes] = self.res_dict[res]
if ii == 0:
indexes = np.where(self.res_model <= res)
self.res_model_int[indexes] = self.res_dict[res]
elif ii == len(self.res_list)-1:
indexes = np.where(self.res_model >= res)
self.res_model_int[indexes] = self.res_dict[res]
else:
l_index = max([0, ii-1])
h_index = min([len(self.res_list)-1, ii+1])
indexes = np.where((self.res_model > self.res_list[l_index]) &
(self.res_model < self.res_list[h_index]))
self.res_model_int[indexes] = self.res_dict[res]
print('Converted resistivity model to integers.')
def write_initial_file(self, **kwargs):
"""
will write an initial file for wsinv3d.
Note that x is assumed to be S --> N, y is assumed to be W --> E and
z is positive downwards. This means that index [0, 0, 0] is the
southwest corner of the first layer. Therefore if you build a model
by hand the layer block will look as it should in map view.
Also, the xgrid, ygrid and zgrid are assumed to be the relative
distance between neighboring nodes. This is needed because wsinv3d
builds the model from the bottom SW corner assuming the cell width
from the init file.
Key Word Arguments:
----------------------
**nodes_north** : np.array(nx)
block dimensions (m) in the N-S direction.
**Note** that the code reads the grid assuming that
index=0 is the southern most point.
**nodes_east** : np.array(ny)
block dimensions (m) in the E-W direction.
**Note** that the code reads in the grid assuming that
index=0 is the western most point.
**nodes_z** : np.array(nz)
block dimensions (m) in the vertical direction.
This is positive downwards.
**save_path** : string
Path to where the initial file will be saved
to savepath/init3d
**res_list** : float or list
The start resistivity as a float or a list of
resistivities that coorespond to the starting
resistivity model **res_model_int**.
This must be input if you input **res_model_int**
If res_list is None, then Nr = 0 and the real
resistivity values are used from **res_model**.
*default* is 100
**title** : string
Title that goes into the first line of savepath/init3d
**res_model** : np.array((nx,ny,nz))
Starting resistivity model. Each cell is allocated an
integer value that cooresponds to the index value of
**res_list**. .. note:: again that the modeling code
assumes that the first row it reads in is the southern
most row and the first column it reads in is the
western most column. Similarly, the first plane it
reads in is the Earth's surface.
**res_model_int** : np.array((nx,ny,nz))
Starting resistivity model. Each cell is allocated an
linear resistivity value.
.. note:: again that the modeling code
assumes that the first row it reads in is the southern
most row and the first column it reads in is the
western most column. Similarly, the first plane it
reads in is the Earth's surface.
"""
keys = ['nodes_east', 'nodes_north', 'nodes_z', 'title', 'res_list',
'res_model', 'res_model_int', 'save_path', 'initial_fn']
for key in keys:
try:
setattr(self, key, kwargs[key])
except KeyError:
if self.__dict__[key] is None:
pass
if self.initial_fn is None:
if self.save_path is None:
self.save_path = os.getcwd()
self.initial_fn = os.path.join(self.save_path, "WSInitialModel")
elif os.path.isdir(self.save_path) == True:
self.initial_fn = os.path.join(self.save_path, "WSInitialModel")
else:
self.save_path = os.path.dirname(self.save_path)
self.initial_fn= self.save_path
#check to see what resistivity in input
if self.res_list is None:
nr = 0
elif type(self.res_list) is not list and \
type(self.res_list) is not np.ndarray:
self.res_list = [self.res_list]
nr = len(self.res_list)
else:
nr = len(self.res_list)
#--> write file
ifid = file(self.initial_fn, 'w')
ifid.write('# {0}\n'.format(self.title.upper()))
ifid.write('{0} {1} {2} {3}\n'.format(self.nodes_north.shape[0],
self.nodes_east.shape[0],
self.nodes_z.shape[0],
nr))
#write S --> N node block
for ii, nnode in enumerate(self.nodes_north):
ifid.write('{0:>12.1f}'.format(abs(nnode)))
if ii != 0 and np.remainder(ii+1, 5) == 0:
ifid.write('\n')
elif ii == self.nodes_north.shape[0]-1:
ifid.write('\n')
#write W --> E node block
for jj, enode in enumerate(self.nodes_east):
ifid.write('{0:>12.1f}'.format(abs(enode)))
if jj != 0 and np.remainder(jj+1, 5) == 0:
ifid.write('\n')
elif jj == self.nodes_east.shape[0]-1:
ifid.write('\n')
#write top --> bottom node block
for kk, zz in enumerate(self.nodes_z):
ifid.write('{0:>12.1f}'.format(abs(zz)))
if kk != 0 and np.remainder(kk+1, 5) == 0:
ifid.write('\n')
elif kk == self.nodes_z.shape[0]-1:
ifid.write('\n')
#write the resistivity list
if nr > 0:
for ff in self.res_list:
ifid.write('{0:.1f} '.format(ff))
ifid.write('\n')
else:
pass
if self.res_model == None:
ifid.close()
else:
if nr > 0:
if self.res_model_int is None:
self.convert_model_to_int()
#need to flip the array such that the 1st index written is the
#northern most value
write_res_model = self.res_model_int[::-1, :, :]
#get similar layers
else:
write_res_model = self.res_model[::-1, :, :]
l1 = 0
layers = []
for zz in range(self.nodes_z.shape[0]-1):
if (write_res_model[:, :, zz] ==
write_res_model[:, :, zz+1]).all() == False:
layers.append((l1, zz))
l1 = zz+1
#need to add on the bottom layers
layers.append((l1, self.nodes_z.shape[0]-1))
#write out the layers from resmodel
for ll in layers:
ifid.write('{0} {1}\n'.format(ll[0]+1, ll[1]+1))
for nn in range(self.nodes_north.shape[0]):
for ee in range(self.nodes_east.shape[0]):
if nr > 0:
ifid.write('{0:>3.0f}'.format(
write_res_model[nn, ee, ll[0]]))
else:
ifid.write('{0:>8.1f}'.format(
write_res_model[nn, ee, ll[0]]))
ifid.write('\n')
ifid.close()
print('Wrote file to: {0}'.format(self.initial_fn))
def read_initial_file(self, initial_fn):
"""
read an initial file and return the pertinent information including
grid positions in coordinates relative to the center point (0,0) and
starting model.
Arguments:
----------
**initial_fn** : full path to initializing file.
Outputs:
--------
**nodes_north** : np.array(nx)
array of nodes in S --> N direction
**nodes_east** : np.array(ny)
array of nodes in the W --> E direction
**nodes_z** : np.array(nz)
array of nodes in vertical direction positive downwards
**res_model** : dictionary
dictionary of the starting model with keys as layers
**res_list** : list
list of resistivity values in the model
**title** : string
title string
"""
self.initial_fn = initial_fn
ifid = file(self.initial_fn, 'r')
ilines = ifid.readlines()
ifid.close()
self.title = ilines[0].strip()
#get size of dimensions, remembering that x is N-S, y is E-W, z is + down
nsize = ilines[1].strip().split()
n_north = int(nsize[0])
n_east = int(nsize[1])
n_z = int(nsize[2])
#initialize empy arrays to put things into
self.nodes_north = np.zeros(n_north)
self.nodes_east = np.zeros(n_east)
self.nodes_z = np.zeros(n_z)
self.res_model_int = np.zeros((n_north, n_east, n_z))
self.res_model = np.zeros((n_north, n_east, n_z))
#get the grid line locations
line_index = 2 #line number in file
count_n = 0 #number of north nodes found
while count_n < n_north:
iline = ilines[line_index].strip().split()
for north_node in iline:
self.nodes_north[count_n] = float(north_node)
count_n += 1
line_index += 1
count_e = 0 #number of east nodes found
while count_e < n_east:
iline = ilines[line_index].strip().split()
for east_node in iline:
self.nodes_east[count_e] = float(east_node)
count_e += 1
line_index += 1
count_z = 0 #number of vertical nodes
while count_z < n_z:
iline = ilines[line_index].strip().split()
for z_node in iline:
self.nodes_z[count_z] = float(z_node)
count_z += 1
line_index += 1
#put the grids into coordinates relative to the center of the grid
self.grid_north = self.nodes_north.copy()
self.grid_north[:int(n_north/2)] =\
-np.array([self.nodes_north[ii:int(n_north/2)].sum()
for ii in range(int(n_north/2))])
self.grid_north[int(n_north/2):] = \
np.array([self.nodes_north[int(n_north/2):ii+1].sum()
for ii in range(int(n_north/2), n_north)])-\
self.nodes_north[int(n_north/2)]
self.grid_east = self.nodes_east.copy()
self.grid_east[:int(n_east/2)] = \
-np.array([self.nodes_east[ii:int(n_east/2)].sum()
for ii in range(int(n_east/2))])
self.grid_east[int(n_east/2):] = \
np.array([self.nodes_east[int(n_east/2):ii+1].sum()
for ii in range(int(n_east/2),n_east)])-\
self.nodes_east[int(n_east/2)]
self.grid_z = np.array([self.nodes_z[:ii+1].sum() for ii in range(n_z)])
#get the resistivity values
self.res_list = [float(rr) for rr in ilines[line_index].strip().split()]
line_index += 1
#get model
try:
iline = ilines[line_index].strip().split()
except IndexError:
self.res_model[:, :, :] = self.res_list[0]
self.res_model[:, :, :] = 1
return
if len(iline) == 0 or len(iline) == 1:
self.res_model[:, :, :] = self.res_list[0]
self.res_model_int[:, :, :] = 1
return
else:
while line_index < len(ilines):
iline = ilines[line_index].strip().split()
if len(iline) == 2:
l1 = int(iline[0])-1
l2 = int(iline[1])
if l1 == l2:
l2 += 1
line_index += 1
count_n = 0
elif len(iline) == 0:
break
else:
count_e = 0
while count_e < n_east:
#be sure the indes of res list starts at 0 not 1 as
#in ws3dinv
self.res_model[count_n, count_e, l1:l2] =\
self.res_list[int(iline[count_e])-1]
self.res_model_int[count_n, count_e, l1:l2] =\
int(iline[count_e])
count_e += 1
count_n += 1
line_index += 1
# Need to be sure that the resistivity array matches
# with the grids, such that the first index is the
# furthest south, even though ws3dinv outputs as first
# index as furthest north.
self.res_model = self.res_model[::-1, :, :]
self.res_model_int = self.res_model_int[::-1, :, :]
#==============================================================================
# model class
#==============================================================================
class WSModel(object):
"""
Reads in model file and fills necessary attributes.
:Example: ::
>>> mfn = r"/home/ws3dinv/test_model.00"
>>> wsmodel = ws.WSModel(mfn)
>>> wsmodel.write_vtk_file(r"/home/ParaviewFiles")
======================= ===================================================
Attributes Description
======================= ===================================================
grid_east overall distance of grid nodes in east direction
grid_north overall distance of grid nodes in north direction
grid_z overall distance of grid nodes in z direction
iteration_number iteration number of the inversion
lagrange lagrange multiplier
model_fn full path to model file
nodes_east relative distance between nodes in east direction
nodes_north relative distance between nodes in north direction
nodes_z relative distance between nodes in east direction
res_model starting resistivity model
rms root mean squared error of data and model
======================= ===================================================
======================= ===================================================
Methods Description
======================= ===================================================
read_model_file read model file and fill attributes
write_vtk_file write a vtk structured grid file for resistivity
model
======================= ===================================================
"""
def __init__(self, model_fn=None):
self.model_fn = model_fn
self.iteration_number = None
self.rms = None
self.lagrange = None
self.res_model = None
self.nodes_north = None
self.nodes_east = None
self.nodes_z = None
self.grid_north = None
self.grid_east = None
self.grid_z = None
if self.model_fn is not None and os.path.isfile(self.model_fn) == True:
self.read_model_file()
def read_model_file(self):
"""
read in a model file as x-north, y-east, z-positive down
"""
mfid = file(self.model_fn, 'r')
mlines = mfid.readlines()
mfid.close()
#get info at the beggining of file
info = mlines[0].strip().split()
self.iteration_number = int(info[2])
self.rms = float(info[5])
try:
self.lagrange = float(info[8])
except IndexError:
print('Did not get Lagrange Multiplier')
#get lengths of things
n_north, n_east, n_z, n_res = np.array(mlines[1].strip().split(),
dtype=np.int)
#make empty arrays to put stuff into
self.nodes_north = np.zeros(n_north)
self.nodes_east = np.zeros(n_east)
self.nodes_z = np.zeros(n_z)
self.res_model = np.zeros((n_north, n_east, n_z))
#get the grid line locations
line_index = 2 #line number in file
count_n = 0 #number of north nodes found
while count_n < n_north:
mline = mlines[line_index].strip().split()
for north_node in mline:
self.nodes_north[count_n] = float(north_node)
count_n += 1
line_index += 1
count_e = 0 #number of east nodes found
while count_e < n_east:
mline = mlines[line_index].strip().split()
for east_node in mline:
self.nodes_east[count_e] = float(east_node)
count_e += 1
line_index += 1
count_z = 0 #number of vertical nodes
while count_z < n_z:
mline = mlines[line_index].strip().split()
for z_node in mline:
self.nodes_z[count_z] = float(z_node)
count_z += 1
line_index += 1
#put the grids into coordinates relative to the center of the grid
self.grid_north = self.nodes_north.copy()
self.grid_north[:int(n_north/2)] =\
-np.array([self.nodes_north[ii:int(n_north/2)].sum()
for ii in range(int(n_north/2))])
self.grid_north[int(n_north/2):] = \
np.array([self.nodes_north[int(n_north/2):ii+1].sum()
for ii in range(int(n_north/2), n_north)])-\
self.nodes_north[int(n_north/2)]
self.grid_east = self.nodes_east.copy()
self.grid_east[:int(n_east/2)] = \
-np.array([self.nodes_east[ii:int(n_east/2)].sum()
for ii in range(int(n_east/2))])
self.grid_east[int(n_east/2):] = \
np.array([self.nodes_east[int(n_east/2):ii+1].sum()
for ii in range(int(n_east/2),n_east)])-\
self.nodes_east[int(n_east/2)]
self.grid_z = np.array([self.nodes_z[:ii+1].sum() for ii in range(n_z)])
#--> get resistivity values
#need to read in the north backwards so that the first index is
#southern most point
for kk in range(n_z):
for jj in range(n_east):
for ii in range(n_north):
self.res_model[(n_north-1)-ii, jj, kk] = \
float(mlines[line_index].strip())
line_index += 1
def write_vtk_file(self, save_fn):
"""
"""
if os.path.isdir(save_fn) == True:
save_fn = os.path.join(save_fn, 'VTKResistivity_Model')
save_fn = gridToVTK(save_fn,
self.grid_north,
self.grid_east,
self.grid_z,
cellData={'resistivity':self.res_model})
print('Wrote vtk file to {0}'.format(save_fn))
#==============================================================================
# Manipulate the model
#==============================================================================
class WSModelManipulator(object):
"""
will plot a model from wsinv3d or init file so the user can manipulate the
resistivity values relatively easily. At the moment only plotted
in map view.
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> initial_fn = r"/home/MT/ws3dinv/Inv1/WSInitialFile"
>>> mm = ws.WSModelManipulator(initial_fn=initial_fn)
=================== =======================================================
Buttons Description
=================== =======================================================
'=' increase depth to next vertical node (deeper)
'-' decrease depth to next vertical node (shallower)
'q' quit the plot, rewrites initial file when pressed
'a' copies the above horizontal layer to the present layer
'b' copies the below horizonal layer to present layer
'u' undo previous change
=================== =======================================================
=================== =======================================================
Attributes Description
=================== =======================================================
ax1 matplotlib.axes instance for mesh plot of the model
ax2 matplotlib.axes instance of colorbar
cb matplotlib.colorbar instance for colorbar
cid_depth matplotlib.canvas.connect for depth
cmap matplotlib.colormap instance
cmax maximum value of resistivity for colorbar. (linear)
cmin minimum value of resistivity for colorbar (linear)
data_fn full path fo data file
depth_index integer value of depth slice for plotting
dpi resolution of figure in dots-per-inch
dscale depth scaling, computed internally
east_line_xlist list of east mesh lines for faster plotting
east_line_ylist list of east mesh lines for faster plotting
fdict dictionary of font properties
fig matplotlib.figure instance
fig_num number of figure instance
fig_size size of figure in inches
font_size size of font in points
grid_east location of east nodes in relative coordinates
grid_north location of north nodes in relative coordinates
grid_z location of vertical nodes in relative coordinates
initial_fn full path to initial file
m_height mean height of horizontal cells
m_width mean width of horizontal cells
map_scale [ 'm' | 'km' ] scale of map
mesh_east np.meshgrid of east, north
mesh_north np.meshgrid of east, north
mesh_plot matplotlib.axes.pcolormesh instance
model_fn full path to model file
new_initial_fn full path to new initial file
nodes_east spacing between east nodes
nodes_north spacing between north nodes
nodes_z spacing between vertical nodes
north_line_xlist list of coordinates of north nodes for faster plotting
north_line_ylist list of coordinates of north nodes for faster plotting
plot_yn [ 'y' | 'n' ] plot on instantiation
radio_res matplotlib.widget.radio instance for change resistivity
rect_selector matplotlib.widget.rect_selector
res np.ndarray(nx, ny, nz) for model in linear resistivity
res_copy copy of res for undo
res_dict dictionary of segmented resistivity values
res_list list of resistivity values for model linear scale
res_model np.ndarray(nx, ny, nz) of resistivity values from
res_list (linear scale)
res_model_int np.ndarray(nx, ny, nz) of integer values corresponding
to res_list for initial model
res_value current resistivty value of radio_res
save_path path to save initial file to
station_east station locations in east direction
station_north station locations in north direction
xlimits limits of plot in e-w direction
ylimits limits of plot in n-s direction
=================== =======================================================
"""
def __init__(self, model_fn=None, initial_fn=None, data_fn=None, **kwargs):
self.model_fn = model_fn
self.initial_fn = initial_fn
self.data_fn = data_fn
self.new_initial_fn = None
self.initial_fn_basename = kwargs.pop('initial_fn_basename',
'WSInitialModel_mm')
if self.model_fn is not None:
self.save_path = os.path.dirname(self.model_fn)
elif self.initial_fn is not None:
self.save_path = os.path.dirname(self.initial_fn)
elif self.data_fn is not None:
self.save_path = os.path.dirname(self.data_fn)
else:
self.save_path = None
#grid nodes
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
#grid locations
self.grid_east = None
self.grid_north = None
self.grid_z = None
#resistivity model
self.res_model_int = None #model in ints
self.res_model = None #model in floats
self.res = None
#station locations in relative coordinates read from data file
self.station_east = None
self.station_north = None
#--> set map scale
self.map_scale = kwargs.pop('map_scale', 'km')
self.m_width = 100
self.m_height = 100
#--> scale the map coordinates
if self.map_scale=='km':
self.dscale = 1000.
if self.map_scale=='m':
self.dscale = 1.
#figure attributes
self.fig = None
self.ax1 = None
self.ax2 = None
self.cb = None
self.east_line_xlist = None
self.east_line_ylist = None
self.north_line_xlist = None
self.north_line_ylist = None
#make a default resistivity list to change values
self.res_dict = None
self.res_list = kwargs.pop('res_list', None)
if self.res_list is None:
self.set_res_list(np.array([.3, 1, 10, 50, 100, 500, 1000, 5000],
dtype=np.float))
else:
try:
if len(self.res_list) > 10:
print ('!! Warning -- ws3dinv can only deal with 10 '
'resistivity values for the initial model')
except TypeError:
self.res_list = [self.res_list]
self.set_res_list(self.res_list)
#read in model or initial file
self.read_file()
#set initial resistivity value
self.res_value = self.res_list[0]
#--> set map limits
self.xlimits = kwargs.pop('xlimits', None)
self.ylimits = kwargs.pop('ylimits', None)
self.font_size = kwargs.pop('font_size', 7)
self.fig_dpi = kwargs.pop('fig_dpi', 300)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.cmap = kwargs.pop('cmap', cm.jet_r)
self.depth_index = kwargs.pop('depth_index', 0)
self.fdict = {'size':self.font_size+2, 'weight':'bold'}
#plot on initialization
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn=='y':
self.plot()
def set_res_list(self, res_list):
"""
on setting res_list also set the res_dict to correspond
"""
self.res_list = res_list
#make a dictionary of values to write to file.
self.res_dict = dict([(res, ii)
for ii, res in enumerate(self.res_list,1)])
if self.fig is not None:
plt.close()
self.plot()
#---read files-------------------------------------------------------------
def read_file(self):
"""
reads in initial file or model file and set attributes:
-resmodel
-northrid
-eastrid
-zgrid
-res_list if initial file
"""
att_names = ['nodes_north', 'nodes_east', 'nodes_z', 'grid_east',
'grid_north', 'grid_z', 'res_model', 'res_list']
#--> read model file
if self.model_fn is not None and self.initial_fn is None:
wsmodel = WSModel(self.model_fn)
wsmodel.read_model_file()
for name in att_names:
if hasattr(wsmodel, name):
value = getattr(wsmodel, name)
setattr(self, name, value)
#--> scale the resistivity values from the model into
# a segmented scale that cooresponds to res_list
self.convert_res_to_model(self.res_model.copy())
#--> read initial file
elif self.initial_fn is not None and self.model_fn is None:
wsmesh = WSMesh()
wsmesh.read_initial_file(self.initial_fn)
for name in att_names:
if hasattr(wsmesh, name):
value = getattr(wsmesh, name)
setattr(self, name, value)
self.res_model_int = wsmesh.res_model
if len(wsmesh.res_list) == 1:
self.set_res_list([.3, 1, 10, 100, 1000])
else:
self.set_res_list(wsmesh.res_list)
#need to convert index values to resistivity values
rdict = dict([(ii,res) for ii,res in enumerate(self.res_list,1)])
for ii in range(len(self.res_list)):
self.res_model[np.where(self.res_model_int==ii+1)] = rdict[ii+1]
elif self.initial_fn is None and self.model_fn is None:
print('Need to input either an initial file or model file to plot')
else:
print('Input just initial file or model file not both.')
#--> read in data file if given
if self.data_fn is not None:
wsdata = WSData()
wsdata.read_data_file(self.data_fn)
#get station locations
self.station_east = wsdata.data['east']
self.station_north = wsdata.data['north']
#get cell block sizes
self.m_height = np.median(self.nodes_north[5:-5])/self.dscale
self.m_width = np.median(self.nodes_east[5:-5])/self.dscale
#make a copy of original in case there are unwanted changes
self.res_copy = self.res_model.copy()
#---plot model-------------------------------------------------------------
def plot(self):
"""
plots the model with:
-a radio dial for depth slice
-radio dial for resistivity value
"""
self.cmin = np.floor(np.log10(min(self.res_list)))
self.cmax = np.ceil(np.log10(max(self.res_list)))
#-->Plot properties
plt.rcParams['font.size'] = self.font_size
#need to add an extra row and column to east and north to make sure
#all is plotted see pcolor for details.
plot_east = np.append(self.grid_east, self.grid_east[-1]*1.25)/self.dscale
plot_north = np.append(self.grid_north, self.grid_north[-1]*1.25)/self.dscale
#make a mesh grid for plotting
#the 'ij' makes sure the resulting grid is in east, north
self.mesh_east, self.mesh_north = np.meshgrid(plot_east,
plot_north,
indexing='ij')
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
plt.clf()
self.ax1 = self.fig.add_subplot(1, 1, 1, aspect='equal')
#transpose to make x--east and y--north
plot_res = np.log10(self.res_model[:,:,self.depth_index].T)
self.mesh_plot = self.ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.cmin,
vmax=self.cmax)
#on plus or minus change depth slice
self.cid_depth = \
self.mesh_plot.figure.canvas.mpl_connect('key_press_event',
self._on_key_callback)
#plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax1.text(ee/self.dscale, nn/self.dscale,
'*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':self.font_size-2,
'weight':'bold'})
#set axis properties
if self.xlimits is not None:
self.ax1.set_xlim(self.xlimits)
else:
self.ax1.set_xlim(xmin=self.grid_east.min()/self.dscale,
xmax=self.grid_east.max()/self.dscale)
if self.ylimits is not None:
self.ax1.set_ylim(self.ylimits)
else:
self.ax1.set_ylim(ymin=self.grid_north.min()/self.dscale,
ymax=self.grid_north.max()/self.dscale)
#self.ax1.xaxis.set_minor_locator(MultipleLocator(100*1./dscale))
#self.ax1.yaxis.set_minor_locator(MultipleLocator(100*1./dscale))
self.ax1.set_ylabel('Northing ('+self.map_scale+')',
fontdict=self.fdict)
self.ax1.set_xlabel('Easting ('+self.map_scale+')',
fontdict=self.fdict)
depth_title = self.grid_z[self.depth_index]/self.dscale
self.ax1.set_title('Depth = {:.3f} '.format(depth_title)+\
'('+self.map_scale+')',
fontdict=self.fdict)
#plot the grid if desired
self.east_line_xlist = []
self.east_line_ylist = []
for xx in self.grid_east:
self.east_line_xlist.extend([xx/self.dscale, xx/self.dscale])
self.east_line_xlist.append(None)
self.east_line_ylist.extend([self.grid_north.min()/self.dscale,
self.grid_north.max()/self.dscale])
self.east_line_ylist.append(None)
self.ax1.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.north_line_xlist = []
self.north_line_ylist = []
for yy in self.grid_north:
self.north_line_xlist.extend([self.grid_east.min()/self.dscale,
self.grid_east.max()/self.dscale])
self.north_line_xlist.append(None)
self.north_line_ylist.extend([yy/self.dscale, yy/self.dscale])
self.north_line_ylist.append(None)
self.ax1.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#plot the colorbar
self.ax2 = mcb.make_axes(self.ax1, orientation='vertical', shrink=.35)
seg_cmap = cmap_discretize(self.cmap, len(self.res_list))
self.cb = mcb.ColorbarBase(self.ax2[0],cmap=seg_cmap,
norm=colors.Normalize(vmin=self.cmin,
vmax=self.cmax))
self.cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size})
self.cb.set_ticks(np.arange(self.cmin, self.cmax+1))
self.cb.set_ticklabels([mtplottools.labeldict[cc]
for cc in np.arange(self.cmin, self.cmax+1)])
#make a resistivity radio button
resrb = self.fig.add_axes([.85,.1,.1,.2])
reslabels = ['{0:.4g}'.format(res) for res in self.res_list]
self.radio_res = widgets.RadioButtons(resrb, reslabels,
active=self.res_dict[self.res_value])
#make a rectangular selector
self.rect_selector = widgets.RectangleSelector(self.ax1,
self.rect_onselect,
drawtype='box',
useblit=True)
plt.show()
#needs to go after show()
self.radio_res.on_clicked(self.set_res_value)
def redraw_plot(self):
"""
redraws the plot
"""
current_xlimits = self.ax1.get_xlim()
current_ylimits = self.ax1.get_ylim()
self.ax1.cla()
plot_res = np.log10(self.res_model[:,:,self.depth_index].T)
self.mesh_plot = self.ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.cmin,
vmax=self.cmax)
#plot the stations
if self.station_east is not None:
for ee,nn in zip(self.station_east, self.station_north):
self.ax1.text(ee/self.dscale, nn/self.dscale,
'*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':self.font_size-2,
'weight':'bold'})
#set axis properties
if self.xlimits is not None:
self.ax1.set_xlim(self.xlimits)
else:
self.ax1.set_xlim(current_xlimits)
if self.ylimits is not None:
self.ax1.set_ylim(self.ylimits)
else:
self.ax1.set_ylim(current_ylimits)
self.ax1.set_ylabel('Northing ('+self.map_scale+')',
fontdict=self.fdict)
self.ax1.set_xlabel('Easting ('+self.map_scale+')',
fontdict=self.fdict)
depth_title = self.grid_z[self.depth_index]/self.dscale
self.ax1.set_title('Depth = {:.3f} '.format(depth_title)+\
'('+self.map_scale+')',
fontdict=self.fdict)
#plot finite element mesh
self.ax1.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.ax1.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#be sure to redraw the canvas
self.fig.canvas.draw()
def set_res_value(self, label):
self.res_value = float(label)
print('set resistivity to ', label)
print(self.res_value)
def _on_key_callback(self,event):
"""
on pressing a key do something
"""
self.event_change_depth = event
#go down a layer on push of +/= keys
if self.event_change_depth.key == '=':
self.depth_index += 1
if self.depth_index>len(self.grid_z)-1:
self.depth_index = len(self.grid_z)-1
print('already at deepest depth')
print('Plotting Depth {0:.3f}'.format(self.grid_z[self.depth_index]/\
self.dscale)+'('+self.map_scale+')')
self.redraw_plot()
#go up a layer on push of - key
elif self.event_change_depth.key == '-':
self.depth_index -= 1
if self.depth_index < 0:
self.depth_index = 0
print('Plotting Depth {0:.3f} '.format(self.grid_z[self.depth_index]/\
self.dscale)+'('+self.map_scale+')')
self.redraw_plot()
#exit plot on press of q
elif self.event_change_depth.key == 'q':
self.event_change_depth.canvas.mpl_disconnect(self.cid_depth)
plt.close(self.event_change_depth.canvas.figure)
self.rewrite_initial_file()
#copy the layer above
elif self.event_change_depth.key == 'a':
try:
if self.depth_index == 0:
print('No layers above')
else:
self.res_model[:, :, self.depth_index] = \
self.res_model[:, :, self.depth_index-1]
except IndexError:
print('No layers above')
self.redraw_plot()
#copy the layer below
elif self.event_change_depth.key == 'b':
try:
self.res_model[:, :, self.depth_index] = \
self.res_model[:, :, self.depth_index+1]
except IndexError:
print('No more layers below')
self.redraw_plot()
#undo
elif self.event_change_depth.key == 'u':
if type(self.xchange) is int and type(self.ychange) is int:
self.res_model[self.ychange, self.xchange, self.depth_index] =\
self.res_copy[self.ychange, self.xchange, self.depth_index]
else:
for xx in self.xchange:
for yy in self.ychange:
self.res_model[yy, xx, self.depth_index] = \
self.res_copy[yy, xx, self.depth_index]
self.redraw_plot()
def change_model_res(self, xchange, ychange):
"""
change resistivity values of resistivity model
"""
if type(xchange) is int and type(ychange) is int:
self.res_model[ychange, xchange, self.depth_index] = self.res_value
else:
for xx in xchange:
for yy in ychange:
self.res_model[yy, xx, self.depth_index] = self.res_value
self.redraw_plot()
def rect_onselect(self, eclick, erelease):
"""
on selecting a rectangle change the colors to the resistivity values
"""
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
self.xchange = self._get_east_index(x1, x2)
self.ychange = self._get_north_index(y1, y2)
#reset values of resistivity
self.change_model_res(self.xchange, self.ychange)
def _get_east_index(self, x1, x2):
"""
get the index value of the points to be changed
"""
if x1 < x2:
xchange = np.where((self.grid_east/self.dscale >= x1) & \
(self.grid_east/self.dscale <= x2))[0]
if len(xchange) == 0:
xchange = np.where(self.grid_east/self.dscale >= x1)[0][0]-1
return [xchange]
if x1 > x2:
xchange = np.where((self.grid_east/self.dscale <= x1) & \
(self.grid_east/self.dscale >= x2))[0]
if len(xchange) == 0:
xchange = np.where(self.grid_east/self.dscale >= x2)[0][0]-1
return [xchange]
#check the edges to see if the selection should include the square
xchange = np.append(xchange, xchange[0]-1)
xchange.sort()
return xchange
def _get_north_index(self, y1, y2):
"""
get the index value of the points to be changed in north direction
need to flip the index because the plot is flipped
"""
if y1 < y2:
ychange = np.where((self.grid_north/self.dscale > y1) & \
(self.grid_north/self.dscale < y2))[0]
if len(ychange) == 0:
ychange = np.where(self.grid_north/self.dscale >= y1)[0][0]-1
return [ychange]
elif y1 > y2:
ychange = np.where((self.grid_north/self.dscale < y1) & \
(self.grid_north/self.dscale > y2))[0]
if len(ychange) == 0:
ychange = np.where(self.grid_north/self.dscale >= y2)[0][0]-1
return [ychange]
ychange -= 1
ychange = np.append(ychange, ychange[-1]+1)
return ychange
def convert_model_to_int(self):
"""
convert the resistivity model that is in ohm-m to integer values
corresponding to res_list
"""
self.res_model_int = np.ones_like(self.res_model)
for ii, res in enumerate(self.res_list):
indexes = np.where(self.res_model == res)
self.res_model_int[indexes] = self.res_dict[res]
if ii == 0:
indexes = np.where(self.res_model <= res)
self.res_model_int[indexes] = self.res_dict[res]
elif ii == len(self.res_list)-1:
indexes = np.where(self.res_model >= res)
self.res_model_int[indexes] = self.res_dict[res]
else:
l_index = max([0, ii-1])
h_index = min([len(self.res_list)-1, ii+1])
indexes = np.where((self.res_model > self.res_list[l_index]) &
(self.res_model < self.res_list[h_index]))
self.res_model_int[indexes] = self.res_dict[res]
def convert_res_to_model(self, res_array):
"""
converts an output model into an array of segmented valued according
to res_list.
output is an array of segemented resistivity values in ohm-m (linear)
"""
#make values in model resistivity array a value in res_list
self.res_model = np.zeros_like(res_array)
for ii, res in enumerate(self.res_list):
indexes = np.where(res_array == res)
self.res_model[indexes] = res
if ii == 0:
indexes = np.where(res_array <= res)
self.res_model[indexes] = res
elif ii == len(self.res_list)-1:
indexes = np.where(res_array >= res)
self.res_model[indexes] = res
else:
l_index = max([0, ii-1])
h_index = min([len(self.res_list)-1, ii+1])
indexes = np.where((res_array > self.res_list[l_index]) &
(res_array < self.res_list[h_index]))
self.res_model[indexes] = res
def rewrite_initial_file(self, save_path=None):
"""
write an initial file for wsinv3d from the model created.
"""
#need to flip the resistivity model so that the first index is the
#northern most block in N-S
#self.res_model = self.res_model[::-1, :, :]
if save_path is not None:
self.save_path = save_path
self.new_initial_fn = os.path.join(self.save_path,
self.initial_fn_basename)
wsmesh = WSMesh()
#pass attribute to wsmesh
att_names = ['nodes_north', 'nodes_east', 'nodes_z', 'grid_east',
'grid_north', 'grid_z', 'res_model', 'res_list',
'res_dict', 'res_model' ]
for name in att_names:
if hasattr(self, name):
value = getattr(self, name)
setattr(wsmesh, name, value)
wsmesh.write_initial_file(initial_fn=self.new_initial_fn)
def cmap_discretize(cmap, N):
"""Return a discrete colormap from the continuous colormap cmap.
cmap: colormap instance, eg. cm.jet.
N: number of colors.
Example
x = resize(arange(100), (5,100))
djet = cmap_discretize(cm.jet, 5)
imshow(x, cmap=djet)
"""
colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))
colors_rgba = cmap(colors_i)
indices = np.linspace(0, 1., N+1)
cdict = {}
for ki,key in enumerate(('red','green','blue')):
cdict[key] = [(indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki])
for i in range(N+1)]
# Return colormap object.
return colors.LinearSegmentedColormap(cmap.name + "_%d"%N, cdict, 1024)
#==============================================================================
# response
#==============================================================================
class WSResponse(object):
"""
class to deal with .resp file output by ws3dinv
====================== ====================================================
Attributes Description
====================== ====================================================
n_z number of vertical layers
period_list list of periods inverted for
resp np.ndarray structured with keys:
* *station* --> station name
* *east* --> relative eastern location in
grid
* *north* --> relative northern location in
grid
* *z_resp* --> impedance tensor array
of response with shape
(n_stations, n_freq, 4, dtype=complex)
* *z_resp_err--> response impedance tensor error
resp_fn full path to response file
station_east location of stations in east direction
station_fn full path to station file written by WSStation
station_names names of stations
station_north location of stations in north direction
units [ 'mv' | 'other' ] units of impedance tensor
wl_sites_fn full path to .sites file from Winglink
z_resp impedance tensors of response with shape
(n_stations, n_periods, 2, 2)
z_resp_err impedance tensors errors of response with shape
(n_stations, n_periods, 2, 2) (zeros)
====================== ====================================================
====================== ====================================================
Methods Description
====================== ====================================================
read_resp_file read response file and fill attributes
====================== ====================================================
"""
def __init__(self, resp_fn=None, station_fn=None, wl_station_fn=None):
self.resp_fn = resp_fn
self.station_fn = station_fn
self.wl_sites_fn = wl_station_fn
self.period_list = None
self.resp = None
self.n_z = None
self.station_east = None
self.station_north = None
self.station_name = None
self.z_resp = None
self.z_resp_err = None
self.units = 'mv'
self._zconv = 796.
if self.resp_fn is not None:
self.read_resp_file()
def read_resp_file(self, resp_fn=None, wl_sites_fn=None, station_fn=None):
"""
read in data file
Arguments:
-----------
**resp_fn** : string
full path to data file
**sites_fn** : string
full path to sites file output by winglink. This is
to match the station name with station number.
**station_fn** : string
full path to station location file
Outputs:
--------
**resp** : structure np.ndarray
fills the attribute WSData.data with values
**period_list** : np.ndarray()
fills the period list with values.
"""
if resp_fn is not None:
self.resp_fn = resp_fn
if wl_sites_fn is not None:
self.wl_sites_fn = wl_sites_fn
if station_fn is not None:
self.station_fn = station_fn
if not os.path.isfile(self.resp_fn):
raise WSInputError('Cannot find {0}, check path'.format(self.resp_fn))
dfid = file(self.resp_fn, 'r')
dlines = dfid.readlines()
#get size number of stations, number of frequencies,
# number of Z components
n_stations, n_periods, nz = np.array(dlines[0].strip().split(),
dtype='int')
nsstart = 2
self.n_z = nz
#make a structured array to keep things in for convenience
z_shape = (n_periods, 2, 2)
resp_dtype = [('station', '|S10'),
('east', np.float),
('north', np.float),
('z_resp', (np.complex, z_shape)),
('z_resp_err', (np.complex, z_shape))]
self.resp = np.zeros(n_stations, dtype=resp_dtype)
findlist = []
for ii, dline in enumerate(dlines[1:50], 1):
if dline.find('Station_Location: N-S') == 0:
findlist.append(ii)
elif dline.find('Station_Location: E-W') == 0:
findlist.append(ii)
elif dline.find('DATA_Period:') == 0:
findlist.append(ii)
ncol = len(dlines[nsstart].strip().split())
#get site names if entered a sites file
if self.wl_sites_fn != None:
slist, station_list = wl.read_sites_file(self.wl_sites_fn)
self.resp['station'] = station_list
elif self.station_fn != None:
stations = WSStation(self.station_fn)
stations.read_station_file()
self.resp['station'] = stations.names
else:
self.resp['station'] = np.arange(n_stations)
#get N-S locations
for ii, dline in enumerate(dlines[findlist[0]+1:findlist[1]],0):
dline = dline.strip().split()
for jj in range(ncol):
try:
self.resp['north'][ii*ncol+jj] = float(dline[jj])
except IndexError:
pass
except ValueError:
break
#get E-W locations
for ii, dline in enumerate(dlines[findlist[1]+1:findlist[2]],0):
dline = dline.strip().split()
for jj in range(self.n_z):
try:
self.resp['east'][ii*ncol+jj] = float(dline[jj])
except IndexError:
pass
except ValueError:
break
#make some empty array to put stuff into
self.period_list = np.zeros(n_periods)
#get resp
per = 0
for ii, dl in enumerate(dlines[findlist[2]:]):
if dl.lower().find('period') > 0:
st = 0
if dl.lower().find('data') == 0:
dkey = 'z_resp'
self.period_list[per] = float(dl.strip().split()[1])
per += 1
elif dl.lower().find('#iteration') >= 0:
break
else:
zline = np.array(dl.strip().split(),dtype=np.float)*self._zconv
self.resp[st][dkey][per-1,:] = np.array([[zline[0]-1j*zline[1],
zline[2]-1j*zline[3]],
[zline[4]-1j*zline[5],
zline[6]-1j*zline[7]]])
st += 1
self.station_east = self.resp['east']
self.station_north = self.resp['north']
self.station_name = self.resp['station']
self.z_resp = self.resp['z_resp']
self.z_resp_err = np.zeros_like(self.z_resp)
#==============================================================================
# WSError
#==============================================================================
class WSInputError(Exception):
pass
#==============================================================================
# plot response
#==============================================================================
class PlotResponse(object):
"""
plot data and response
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> rfn = r"/home/MT/ws3dinv/Inv1/Test_resp.00"
>>> sfn = r"/home/MT/ws3dinv/Inv1/WSStationLocations.txt"
>>> wsrp = ws.PlotResponse(data_fn=dfn, resp_fn=rfn, station_fn=sfn)
>>> # plot only the TE and TM modes
>>> wsrp.plot_component = 2
>>> wsrp.redraw_plot()
======================== ==================================================
Attributes Description
======================== ==================================================
color_mode [ 'color' | 'bw' ] color or black and white plots
cted color for data TE mode
ctem color for data TM mode
ctmd color for model TE mode
ctmm color for model TM mode
data_fn full path to data file
data_object WSResponse instance
e_capsize cap size of error bars in points (*default* is .5)
e_capthick cap thickness of error bars in points (*default*
is 1)
fig_dpi resolution of figure in dots-per-inch (300)
fig_list list of matplotlib.figure instances for plots
fig_size size of figure in inches (*default* is [6, 6])
font_size size of font for tick labels, axes labels are
font_size+2 (*default* is 7)
legend_border_axes_pad padding between legend box and axes
legend_border_pad padding between border of legend and symbols
legend_handle_text_pad padding between text labels and symbols of legend
legend_label_spacing padding between labels
legend_loc location of legend
legend_marker_scale scale of symbols in legend
lw line width response curves (*default* is .5)
ms size of markers (*default* is 1.5)
mted marker for data TE mode
mtem marker for data TM mode
mtmd marker for model TE mode
mtmm marker for model TM mode
phase_limits limits of phase
plot_component [ 2 | 4 ] 2 for TE and TM or 4 for all components
plot_style [ 1 | 2 ] 1 to plot each mode in a seperate
subplot and 2 to plot xx, xy and yx, yy in same
plots
plot_type [ '1' | list of station name ] '1' to plot all
stations in data file or input a list of station
names to plot if station_fn is input, otherwise
input a list of integers associated with the
index with in the data file, ie 2 for 2nd station
plot_z [ True | False ] *default* is True to plot
impedance, False for plotting resistivity and
phase
plot_yn [ 'n' | 'y' ] to plot on instantiation
res_limits limits of resistivity in linear scale
resp_fn full path to response file
resp_object WSResponse object for resp_fn, or list of
WSResponse objects if resp_fn is a list of
response files
station_fn full path to station file written by WSStation
subplot_bottom space between axes and bottom of figure
subplot_hspace space between subplots in vertical direction
subplot_left space between axes and left of figure
subplot_right space between axes and right of figure
subplot_top space between axes and top of figure
subplot_wspace space between subplots in horizontal direction
======================== ==================================================
"""
def __init__(self, data_fn=None, resp_fn=None, station_fn=None, **kwargs):
self.data_fn = data_fn
self.resp_fn = resp_fn
self.station_fn = station_fn
self.data_object = None
self.resp_object = []
self.color_mode = kwargs.pop('color_mode', 'color')
self.ms = kwargs.pop('ms', 1.5)
self.lw = kwargs.pop('lw', .5)
self.e_capthick = kwargs.pop('e_capthick', .5)
self.e_capsize = kwargs.pop('e_capsize', 2)
#color mode
if self.color_mode == 'color':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 1))
self.ctmd = kwargs.pop('ctmd', (1, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0, .6, .3))
self.ctmm = kwargs.pop('ctmm', (.9, 0, .8))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', '+')
#black and white mode
elif self.color_mode == 'bw':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 0))
self.ctmd = kwargs.pop('ctmd', (0, 0, 0))
self.mted = kwargs.pop('mted', '*')
self.mtmd = kwargs.pop('mtmd', 'v')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0.6, 0.6, 0.6))
self.ctmm = kwargs.pop('ctmm', (0.6, 0.6, 0.6))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', 'x')
self.phase_limits = kwargs.pop('phase_limits', None)
self.res_limits = kwargs.pop('res_limits', None)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.subplot_wspace = .2
self.subplot_hspace = .0
self.subplot_right = .98
self.subplot_left = .08
self.subplot_top = .93
self.subplot_bottom = .1
self.legend_loc = 'upper left'
self.legend_marker_scale = 1
self.legend_border_axes_pad = .01
self.legend_label_spacing = 0.07
self.legend_handle_text_pad = .2
self.legend_border_pad = .15
self.font_size = kwargs.pop('font_size', 6)
self.plot_type = kwargs.pop('plot_type', '1')
self.plot_style = kwargs.pop('plot_style', 1)
self.plot_component = kwargs.pop('plot_component', 4)
self.plot_yn = kwargs.pop('plot_yn', 'y')
self.plot_z = kwargs.pop('plot_z', True)
self.ylabel_pad = kwargs.pop('ylabel_pad', 1.25)
self.fig_list = []
if self.plot_yn == 'y':
self.plot()
def plot_errorbar(self, ax, period, data, error, color, marker):
"""
convinience function to make an error bar instance
"""
errorbar_object = ax.errorbar(period,
data,
marker=marker,
ms=self.ms,
mfc='None',
mec=color,
ls=':',
yerr=error,
ecolor=color,
color=color,
picker=2,
lw=self.lw,
elinewidth=self.lw,
capsize=self.e_capsize,
capthick=self.e_capthick)
return errorbar_object
def plot(self):
"""
plot
"""
self.data_object = WSData()
self.data_object.read_data_file(self.data_fn,
station_fn=self.station_fn)
#get shape of impedance tensors
ns = self.data_object.data['station'].shape[0]
nf = len(self.data_object.period_list)
#read in response files
if self.resp_fn != None:
self.resp_object = []
if type(self.resp_fn) is not list:
self.resp_object = [WSResponse(self.resp_fn,
station_fn=self.station_fn)]
else:
for rfile in self.resp_fn:
self.resp_object.append(WSResponse(rfile,
station_fn=self.station_fn))
#get number of response files
nr = len(self.resp_object)
if type(self.plot_type) is list:
ns = len(self.plot_type)
#--> set default font size
plt.rcParams['font.size'] = self.font_size
fontdict = {'size':self.font_size+2, 'weight':'bold'}
if self.plot_z == True:
h_ratio = [1,1]
elif self.plot_z == False:
h_ratio = [2, 1.5]
gs = gridspec.GridSpec(2, 2, height_ratios=h_ratio, hspace=.1)
ax_list = []
line_list = []
label_list = []
if self.plot_type != '1':
pstation_list = []
if type(self.plot_type) is not list:
self.plot_type = [self.plot_type]
for ii, station in enumerate(self.data_object.data['station']):
if type(station) is not int:
for pstation in self.plot_type:
if station.find(str(pstation)) >= 0:
pstation_list.append(ii)
else:
for pstation in self.plot_type:
if station == int(pstation):
pstation_list.append(ii)
else:
pstation_list = np.arange(ns)
for jj in pstation_list:
data_z = self.data_object.z_data[jj]
data_z_err = self.data_object.z_data_err[jj]
period = self.data_object.period_list
station = self.data_object.station_names[jj]
print('Plotting: {0}'.format(station))
#check for masked points
data_z[np.where(data_z == 7.95204E5-7.95204E5j)] = 0.0+0.0j
data_z_err[np.where(data_z_err == 7.95204E5-7.95204E5j)] =\
1.0+1.0j
#convert to apparent resistivity and phase
z_object = mtz.Z(z_array=data_z, z_err_array=data_z_err,
freq=1./period)
rp = mtplottools.ResPhase(z_object)
#find locations where points have been masked
nzxx = np.where(rp.resxx!=0)[0]
nzxy = np.where(rp.resxy!=0)[0]
nzyx = np.where(rp.resyx!=0)[0]
nzyy = np.where(rp.resyy!=0)[0]
if self.resp_fn != None:
plotr = True
else:
plotr = False
#make figure
fig = plt.figure(station, self.fig_size, dpi=self.fig_dpi)
plt.clf()
fig.suptitle(str(station), fontdict=fontdict)
#set the grid of subplots
gs = gridspec.GridSpec(2, 4,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace,
height_ratios=h_ratio)
#---------plot the apparent resistivity-----------------------------------
#plot each component in its own subplot
if self.plot_style == 1:
if self.plot_component == 2:
axrxy = fig.add_subplot(gs[0, 0:2])
axryx = fig.add_subplot(gs[0, 2:], sharex=axrxy)
axpxy = fig.add_subplot(gs[1, 0:2])
axpyx = fig.add_subplot(gs[1, 2:], sharex=axrxy)
if self.plot_z == False:
#plot resistivity
erxy = self.plot_errorbar(axrxy,
period[nzxy],
rp.resxy[nzxy],
rp.resxy_err[nzxy],
self.cted, self.mted)
eryx = self.plot_errorbar(axryx,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
self.ctmd, self.mtmd)
#plot phase
erxy = self.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
self.cted, self.mted)
eryx = self.plot_errorbar(axpyx,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
self.ctmd, self.mtmd)
elif self.plot_z == True:
#plot real
erxy = self.plot_errorbar(axrxy,
period[nzxy],
z_object.z[nzxy,0,1].real,
z_object.z_err[nzxy,0,1].real,
self.cted, self.mted)
eryx = self.plot_errorbar(axryx,
period[nzyx],
z_object.z[nzyx,1,0].real,
z_object.z_err[nzyx,1,0].real,
self.ctmd, self.mtmd)
#plot phase
erxy = self.plot_errorbar(axpxy,
period[nzxy],
z_object.z[nzxy,0,1].imag,
z_object.z_err[nzxy,0,1].imag,
self.cted, self.mted)
eryx = self.plot_errorbar(axpyx,
period[nzyx],
z_object.z[nzyx,1,0].imag,
z_object.z_err[nzyx,1,0].imag,
self.ctmd, self.mtmd)
ax_list = [axrxy, axryx, axpxy, axpyx]
line_list = [[erxy[0]], [eryx[0]]]
label_list = [['$Z_{xy}$'], ['$Z_{yx}$']]
elif self.plot_component == 4:
axrxx = fig.add_subplot(gs[0, 0])
axrxy = fig.add_subplot(gs[0, 1], sharex=axrxx)
axryx = fig.add_subplot(gs[0, 2], sharex=axrxx)
axryy = fig.add_subplot(gs[0, 3], sharex=axrxx)
axpxx = fig.add_subplot(gs[1, 0])
axpxy = fig.add_subplot(gs[1, 1], sharex=axrxx)
axpyx = fig.add_subplot(gs[1, 2], sharex=axrxx)
axpyy = fig.add_subplot(gs[1, 3], sharex=axrxx)
if self.plot_z == False:
#plot resistivity
erxx= self.plot_errorbar(axrxx,
period[nzxx],
rp.resxx[nzxx],
rp.resxx_err[nzxx],
self.cted, self.mted)
erxy = self.plot_errorbar(axrxy,
period[nzxy],
rp.resxy[nzxy],
rp.resxy_err[nzxy],
self.cted, self.mted)
eryx = self.plot_errorbar(axryx,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
self.ctmd, self.mtmd)
eryy = self.plot_errorbar(axryy,
period[nzyy],
rp.resyy[nzyy],
rp.resyy_err[nzyy],
self.ctmd, self.mtmd)
#plot phase
erxx= self.plot_errorbar(axpxx,
period[nzxx],
rp.phasexx[nzxx],
rp.phasexx_err[nzxx],
self.cted, self.mted)
erxy = self.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
self.cted, self.mted)
eryx = self.plot_errorbar(axpyx,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
self.ctmd, self.mtmd)
eryy = self.plot_errorbar(axpyy,
period[nzyy],
rp.phaseyy[nzyy],
rp.phaseyy_err[nzyy],
self.ctmd, self.mtmd)
elif self.plot_z == True:
#plot real
erxx = self.plot_errorbar(axrxx,
period[nzxx],
z_object.z[nzxx,0,0].real,
z_object.z_err[nzxx,0,0].real,
self.cted, self.mted)
erxy = self.plot_errorbar(axrxy,
period[nzxy],
z_object.z[nzxy,0,1].real,
z_object.z_err[nzxy,0,1].real,
self.cted, self.mted)
eryx = self.plot_errorbar(axryx,
period[nzyx],
z_object.z[nzyx,1,0].real,
z_object.z_err[nzyx,1,0].real,
self.ctmd, self.mtmd)
eryy = self.plot_errorbar(axryy,
period[nzyy],
z_object.z[nzyy,1,1].real,
z_object.z_err[nzyy,1,1].real,
self.ctmd, self.mtmd)
#plot phase
erxx = self.plot_errorbar(axpxx,
period[nzxx],
z_object.z[nzxx,0,0].imag,
z_object.z_err[nzxx,0,0].imag,
self.cted, self.mted)
erxy = self.plot_errorbar(axpxy,
period[nzxy],
z_object.z[nzxy,0,1].imag,
z_object.z_err[nzxy,0,1].imag,
self.cted, self.mted)
eryx = self.plot_errorbar(axpyx,
period[nzyx],
z_object.z[nzyx,1,0].imag,
z_object.z_err[nzyx,1,0].imag,
self.ctmd, self.mtmd)
eryy = self.plot_errorbar(axpyy,
period[nzyy],
z_object.z[nzyy,1,1].imag,
z_object.z_err[nzyy,1,1].imag,
self.ctmd, self.mtmd)
ax_list = [axrxx, axrxy, axryx, axryy,
axpxx, axpxy, axpyx, axpyy]
line_list = [[erxx[0]], [erxy[0]], [eryx[0]], [eryy[0]]]
label_list = [['$Z_{xx}$'], ['$Z_{xy}$'],
['$Z_{yx}$'], ['$Z_{yy}$']]
#set axis properties
for aa, ax in enumerate(ax_list):
ax.tick_params(axis='y', pad=self.ylabel_pad)
if len(ax_list) == 4:
if aa < 2:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log', nonposy='clip')
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
#set axes labels
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Z (m/s)]',
fontdict=fontdict)
elif aa == 2:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Z (m/s)]',
fontdict=fontdict)
# else:
# plt.setp(ax.yaxis.get_ticklabels(), visible=False)
elif len(ax_list) == 8:
if aa < 4:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log', nonposy='clip')
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
#set axes labels
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Z (m/s)]',
fontdict=fontdict)
elif aa == 4:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Z (m/s)]',
fontdict=fontdict)
# else:
# plt.setp(ax.yaxis.get_ticklabels(), visible=False)
ax.set_xscale('log', nonposx='clip')
ax.set_xlim(xmin=10**(np.floor(np.log10(period[0]))) * 1.01,
xmax=10**(np.ceil(np.log10(period[-1]))) * .99)
ax.grid(True, alpha=.25)
# plot xy and yx together and xx, yy together
elif self.plot_style == 2:
if self.plot_component == 2:
axrxy = fig.add_subplot(gs[0, 0:])
axpxy = fig.add_subplot(gs[1, 0:], sharex=axrxy)
if self.plot_z == False:
#plot resistivity
erxy = self.plot_errorbar(axrxy,
period[nzxy],
rp.resxy[nzxy],
rp.resxy_err[nzxy],
self.cted, self.mted)
eryx = self.plot_errorbar(axrxy,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
self.ctmd, self.mtmd)
#plot phase
erxy = self.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
self.cted, self.mted)
eryx = self.plot_errorbar(axpxy,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
self.ctmd, self.mtmd)
elif self.plot_z == True:
#plot real
erxy = self.plot_errorbar(axrxy,
period[nzxy],
z_object.z[nzxy,0,1].real,
z_object.z_err[nzxy,0,1].real,
self.cted, self.mted)
eryx = self.plot_errorbar(axrxy,
period[nzxy],
z_object.z[nzxy,1,0].real,
z_object.z_err[nzxy,1,0].real,
self.ctmd, self.mtmd)
#plot phase
erxy = self.plot_errorbar(axpxy,
period[nzxy],
z_object.z[nzxy,0,1].imag,
z_object.z_err[nzxy,0,1].imag,
self.cted, self.mted)
eryx = self.plot_errorbar(axpxy,
period[nzyx],
z_object.z[nzyx,1,0].imag,
z_object.z_err[nzyx,1,0].imag,
self.ctmd, self.mtmd)
ax_list = [axrxy, axpxy]
line_list = [erxy[0], eryx[0]]
label_list = ['$Z_{xy}$', '$Z_{yx}$']
elif self.plot_component == 4:
axrxy = fig.add_subplot(gs[0, 0:2])
axpxy = fig.add_subplot(gs[1, 0:2], sharex=axrxy)
axrxx = fig.add_subplot(gs[0, 2:], sharex=axrxy)
axpxx = fig.add_subplot(gs[1, 2:], sharex=axrxy)
if self.plot_z == False:
#plot resistivity
erxx= self.plot_errorbar(axrxx,
period[nzxx],
rp.resxx[nzxx],
rp.resxx_err[nzxx],
self.cted, self.mted)
erxy = self.plot_errorbar(axrxy,
period[nzxy],
rp.resxy[nzxy],
rp.resxy_err[nzxy],
self.cted, self.mted)
eryx = self.plot_errorbar(axrxy,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
self.ctmd, self.mtmd)
eryy = self.plot_errorbar(axrxx,
period[nzyy],
rp.resyy[nzyy],
rp.resyy_err[nzyy],
self.ctmd, self.mtmd)
#plot phase
erxx= self.plot_errorbar(axpxx,
period[nzxx],
rp.phasexx[nzxx],
rp.phasexx_err[nzxx],
self.cted, self.mted)
erxy = self.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
self.cted, self.mted)
eryx = self.plot_errorbar(axpxy,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
self.ctmd, self.mtmd)
eryy = self.plot_errorbar(axpxx,
period[nzyy],
rp.phaseyy[nzyy],
rp.phaseyy_err[nzyy],
self.ctmd, self.mtmd)
elif self.plot_z == True:
#plot real
erxx = self.plot_errorbar(axrxx,
period[nzxx],
z_object.z[nzxx,0,0].real,
z_object.z_err[nzxx,0,0].real,
self.cted, self.mted)
erxy = self.plot_errorbar(axrxy,
period[nzxy],
z_object.z[nzxy,0,1].real,
z_object.z_err[nzxy,0,1].real,
self.cted, self.mted)
eryx = self.plot_errorbar(axrxy,
period[nzyx],
z_object.z[nzyx,1,0].real,
z_object.z_err[nzyx,1,0].real,
self.ctmd, self.mtmd)
eryy = self.plot_errorbar(axrxx,
period[nzyy],
z_object.z[nzyy,1,1].real,
z_object.z_err[nzyy,1,1].real,
self.ctmd, self.mtmd)
#plot phase
erxx = self.plot_errorbar(axpxx,
period[nzxx],
z_object.z[nzxx,0,0].imag,
z_object.z_err[nzxx,0,0].imag,
self.cted, self.mted)
erxy = self.plot_errorbar(axpxy,
period[nzxy],
z_object.z[nzxy,0,1].imag,
z_object.z_err[nzxy,0,1].imag,
self.cted, self.mted)
eryx = self.plot_errorbar(axpxy,
period[nzyx],
z_object.z[nzyx,1,0].imag,
z_object.z_err[nzyx,1,0].imag,
self.ctmd, self.mtmd)
eryy = self.plot_errorbar(axpxx,
period[nzyy],
z_object.z[nzyy,1,1].imag,
z_object.z_err[nzyy,1,1].imag,
self.ctmd, self.mtmd)
ax_list = [axrxy, axrxx, axpxy, axpxx]
line_list = [[erxy[0], eryx[0]], [erxx[0], eryy[0]]]
label_list = [['$Z_{xy}$', '$Z_{yx}$'],
['$Z_{xx}$', '$Z_{yy}$']]
#set axis properties
for aa, ax in enumerate(ax_list):
ax.tick_params(axis='y', pad=self.ylabel_pad)
if len(ax_list) == 2:
ax.set_xlabel('Period (s)', fontdict=fontdict)
if aa == 0:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log', nonposy='clip')
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Impedance (m/s)]',
fontdict=fontdict)
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
ax.set_ylim(self.phase_limits)
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Impedance (m/s)]',
fontdict=fontdict)
elif len(ax_list) == 4:
if aa < 2:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log', nonposy='clip')
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Impedance (m/s)]',
fontdict=fontdict)
elif aa == 2:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Impedance (m/s)]',
fontdict=fontdict)
# else:
# plt.setp(ax.yaxis.get_ticklabels(), visible=False)
ax.set_xscale('log', nonposx='clip')
ax.set_xlim(xmin=10**(np.floor(np.log10(period[0]))) * 1.01,
xmax=10**(np.ceil(np.log10(period[-1]))) * .99)
ax.grid(True, alpha=.25)
if plotr == True:
for rr in range(nr):
if self.color_mode == 'color':
cxy = (0,.4+float(rr)/(3*nr),0)
cyx = (.7+float(rr)/(4*nr),.13,.63-float(rr)/(4*nr))
elif self.color_mode == 'bw':
cxy = (1-1.25/(rr+2.),1-1.25/(rr+2.),1-1.25/(rr+2.))
cyx = (1-1.25/(rr+2.),1-1.25/(rr+2.),1-1.25/(rr+2.))
resp_z = self.resp_object[rr].z_resp[jj]
resp_z_err = (data_z-resp_z)/(data_z_err)
resp_z_object = mtz.Z(z_array=resp_z,
z_err_array=resp_z_err,
freq=1./period)
rrp = mtplottools.ResPhase(resp_z_object)
rms = resp_z_err.std()
rms_xx = resp_z_err[:, 0, 0].std()
rms_xy = resp_z_err[:, 0, 1].std()
rms_yx = resp_z_err[:, 1, 0].std()
rms_yy = resp_z_err[:, 1, 1].std()
print(' --- response {0} ---'.format(rr))
print(' RMS = {:.2f}'.format(rms))
print(' RMS_xx = {:.2f}'.format(rms_xx))
print(' RMS_xy = {:.2f}'.format(rms_xy))
print(' RMS_yx = {:.2f}'.format(rms_yx))
print(' RMS_yy = {:.2f}'.format(rms_yy))
if self.plot_style == 1:
if self.plot_component == 2:
if self.plot_z == False:
#plot resistivity
rerxy = self.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[nzxy],
None,
cxy, self.mted)
reryx = self.plot_errorbar(axryx,
period[nzyx],
rrp.resyx[nzyx],
None,
cyx, self.mtmd)
#plot phase
rerxy = self.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[nzxy],
None,
cxy, self.mted)
reryx = self.plot_errorbar(axpyx,
period[nzyx],
rrp.phaseyx[nzyx],
None,
cyx, self.mtmd)
elif self.plot_z == True:
#plot real
rerxy = self.plot_errorbar(axrxy,
period[nzxy],
resp_z[nzxy,0,1].real,
None,
cxy, self.mted)
reryx = self.plot_errorbar(axryx,
period[nzyx],
resp_z[nzyx,1,0].real,
None,
cyx, self.mtmd)
#plot phase
rerxy = self.plot_errorbar(axpxy,
period[nzxy],
resp_z[nzxy,0,1].imag,
None,
cxy, self.mted)
reryx = self.plot_errorbar(axpyx,
period[nzyx],
resp_z[nzyx,1,0].imag,
None,
cyx, self.mtmd)
line_list[0] += [rerxy[0]]
line_list[1] += [reryx[0]]
label_list[0] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[1] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
elif self.plot_component == 4:
if self.plot_z == False:
#plot resistivity
rerxx= self.plot_errorbar(axrxx,
period[nzxx],
rrp.resxx[nzxx],
None,
cxy, self.mted)
rerxy = self.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[nzxy],
None,
cxy, self.mted)
reryx = self.plot_errorbar(axryx,
period[nzyx],
rrp.resyx[nzyx],
None,
cyx, self.mtmd)
reryy = self.plot_errorbar(axryy,
period[nzyy],
rrp.resyy[nzyy],
None,
cyx, self.mtmd)
#plot phase
rerxx= self.plot_errorbar(axpxx,
period[nzxx],
rrp.phasexx[nzxx],
None,
cxy, self.mted)
rerxy = self.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[nzxy],
None,
cxy, self.mted)
reryx = self.plot_errorbar(axpyx,
period[nzyx],
rrp.phaseyx[nzyx],
None,
cyx, self.mtmd)
reryy = self.plot_errorbar(axpyy,
period[nzyy],
rrp.phaseyy[nzyy],
None,
cyx, self.mtmd)
elif self.plot_z == True:
#plot real
rerxx = self.plot_errorbar(axrxx,
period[nzxx],
resp_z[nzxx,0,0].real,
None,
cxy, self.mted)
rerxy = self.plot_errorbar(axrxy,
period[nzxy],
resp_z[nzxy,0,1].real,
None,
cxy, self.mted)
reryx = self.plot_errorbar(axryx,
period[nzyx],
resp_z[nzyx,1,0].real,
None,
cyx, self.mtmd)
reryy = self.plot_errorbar(axryy,
period[nzyy],
resp_z[nzyy,1,1].real,
None,
cyx, self.mtmd)
#plot phase
rerxx = self.plot_errorbar(axpxx,
period[nzxx],
resp_z[nzxx,0,0].imag,
None,
cxy, self.mted)
rerxy = self.plot_errorbar(axpxy,
period[nzxy],
resp_z[nzxy,0,1].imag,
None,
cxy, self.mted)
reryx = self.plot_errorbar(axpyx,
period[nzyx],
resp_z[nzyx,1,0].imag,
None,
cyx, self.mtmd)
reryy = self.plot_errorbar(axpyy,
period[nzyy],
resp_z[nzyy,1,1].imag,
None,
cyx, self.mtmd)
line_list[0] += [rerxx[0]]
line_list[1] += [rerxy[0]]
line_list[2] += [reryx[0]]
line_list[3] += [reryy[0]]
label_list[0] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx)]
label_list[1] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[2] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[3] += ['$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
elif self.plot_style == 2:
if self.plot_component == 2:
if self.plot_z == False:
#plot resistivity
rerxy = self.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[nzxy],
None,
cxy, self.mted)
reryx = self.plot_errorbar(axrxy,
period[nzyx],
rrp.resyx[nzyx],
None,
cyx, self.mtmd)
#plot phase
rerxy = self.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[nzxy],
None,
cxy, self.mted)
reryx = self.plot_errorbar(axpxy,
period[nzyx],
rrp.phaseyx[nzyx],
None,
cyx, self.mtmd)
elif self.plot_z == True:
#plot real
rerxy = self.plot_errorbar(axrxy,
period[nzxy],
resp_z[nzxy,0,1].real,
None,
cxy, self.mted)
reryx = self.plot_errorbar(axrxy,
period[nzyx],
resp_z[nzyx,1,0].real,
None,
cyx, self.mtmd)
#plot phase
rerxy = self.plot_errorbar(axpxy,
period[nzxy],
resp_z[nzxy,0,1].imag,
None,
cyx, self.mted)
reryx = self.plot_errorbar(axpxy,
period[nzyx],
resp_z[nzyx,1,0].imag,
None,
cyx, self.mtmd)
line_list += [rerxy[0], reryx[0]]
label_list += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy),
'$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
elif self.plot_component == 4:
if self.plot_z == False:
#plot resistivity
rerxx= self.plot_errorbar(axrxx,
period[nzxx],
rrp.resxx[nzxx],
None,
cxy, self.mted)
rerxy = self.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[nzxy],
None,
cxy, self.mted)
reryx = self.plot_errorbar(axrxy,
period[nzyx],
rrp.resyx[nzyx],
None,
cyx, self.mtmd)
reryy = self.plot_errorbar(axrxx,
period[nzyy],
rrp.resyy[nzyy],
None,
cyx, self.mtmd)
#plot phase
rerxx= self.plot_errorbar(axpxx,
period[nzxx],
rrp.phasexx[nzxx],
None,
cxy, self.mted)
rerxy = self.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[nzxy],
None,
cxy, self.mted)
reryx = self.plot_errorbar(axpxy,
period[nzyx],
rrp.phaseyx[nzyx],
None,
cyx, self.mtmd)
reryy = self.plot_errorbar(axpxx,
period[nzyy],
rrp.phaseyy[nzyy],
None,
cyx, self.mtmd)
elif self.plot_z == True:
#plot real
rerxx = self.plot_errorbar(axrxx,
period[nzxx],
resp_z[nzxx,0,0].real,
None,
cxy, self.mted)
rerxy = self.plot_errorbar(axrxy,
period[nzxy],
resp_z[nzxy,0,1].real,
None,
cxy, self.mted)
reryx = self.plot_errorbar(axrxy,
period[nzyx],
resp_z[nzyx,1,0].real,
None,
cyx, self.mtmd)
reryy = self.plot_errorbar(axrxx,
period[nzyy],
resp_z[nzyy,1,1].real,
None,
cyx, self.mtmd)
#plot phase
rerxx = self.plot_errorbar(axpxx,
period[nzxx],
resp_z[nzxx,0,0].imag,
None,
cxy, self.mted)
rerxy = self.plot_errorbar(axpxy,
period[nzxy],
resp_z[nzxy,0,1].imag,
None,
cxy, self.mted)
reryx = self.plot_errorbar(axpxy,
period[nzyx],
resp_z[nzyx,1,0].imag,
None,
cyx, self.mtmd)
reryy = self.plot_errorbar(axpxx,
period[nzyy],
resp_z[nzyy,1,1].imag,
None,
cyx, self.mtmd)
line_list[0] += [rerxy[0], reryx[0]]
line_list[1] += [rerxx[0], reryy[0]]
label_list[0] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy),
'$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[1] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx),
'$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
#make legends
if self.plot_style == 1:
for aa, ax in enumerate(ax_list[0:self.plot_component]):
ax.legend(line_list[aa],
label_list[aa],
loc=self.legend_loc,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size':max([self.font_size/nr, 5])})
if self.plot_style == 2:
if self.plot_component == 2:
axrxy.legend(line_list,
label_list,
loc=self.legend_loc,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size':max([self.font_size/nr, 5])})
else:
for aa, ax in enumerate(ax_list[0:self.plot_component/2]):
ax.legend(line_list[aa],
label_list[aa],
loc=self.legend_loc,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size':max([self.font_size/nr, 5])})
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def save_figure(self, save_fn, file_format='pdf', orientation='portrait',
fig_dpi=None, close_fig='y'):
"""
save_plot will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, '_L2.'+
file_format)
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print('Saved figure to: '+self.fig_fn)
def update_plot(self):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
self.fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots data vs model response computed by WS3DINV")
#==============================================================================
# plot depth slices
#==============================================================================
class PlotDepthSlice(object):
"""
Plots depth slices of resistivity model
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> sfn = r"/home/MT/ws3dinv/Inv1/WSStationLocations.txt"
>>> # plot just first layer to check the formating
>>> pds = ws.PlotDepthSlice(model_fn=mfn, station_fn=sfn,
>>> ... depth_index=0, save_plots='n')
>>> #move color bar up
>>> pds.cb_location
>>> (0.64500000000000002, 0.14999999999999997, 0.3, 0.025)
>>> pds.cb_location = (.645, .175, .3, .025)
>>> pds.redraw_plot()
>>> #looks good now plot all depth slices and save them to a folder
>>> pds.save_path = r"/home/MT/ws3dinv/Inv1/DepthSlices"
>>> pds.depth_index = None
>>> pds.save_plots = 'y'
>>> pds.redraw_plot()
======================= ===================================================
Attributes Description
======================= ===================================================
cb_location location of color bar (x, y, width, height)
*default* is None, automatically locates
cb_orientation [ 'vertical' | 'horizontal' ]
*default* is horizontal
cb_pad padding between axes and colorbar
*default* is None
cb_shrink percentage to shrink colorbar by
*default* is None
climits (min, max) of resistivity color on log scale
*default* is (0, 4)
cmap name of color map *default* is 'jet_r'
data_fn full path to data file
depth_index integer value of depth slice index, shallowest
layer is 0
dscale scaling parameter depending on map_scale
ew_limits (min, max) plot limits in e-w direction in
map_scale units. *default* is None, sets viewing
area to the station area
fig_aspect aspect ratio of plot. *default* is 1
fig_dpi resolution of figure in dots-per-inch. *default* is
300
fig_list list of matplotlib.figure instances for each
depth slice
fig_size [width, height] in inches of figure size
*default* is [6, 6]
font_size size of ticklabel font in points, labels are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
initial_fn full path to initial file
map_scale [ 'km' | 'm' ] distance units of map. *default* is
km
mesh_east np.meshgrid(grid_east, grid_north, indexing='ij')
mesh_north np.meshgrid(grid_east, grid_north, indexing='ij')
model_fn full path to model file
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
ns_limits (min, max) plot limits in n-s direction in
map_scale units. *default* is None, sets viewing
area to the station area
plot_grid [ 'y' | 'n' ] 'y' to plot mesh grid lines.
*default* is 'n'
plot_yn [ 'y' | 'n' ] 'y' to plot on instantiation
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
save_path path to save figures to
save_plots [ 'y' | 'n' ] 'y' to save depth slices to save_path
station_east location of stations in east direction in
map_scale units
station_fn full path to station locations file
station_names station names
station_north location of station in north direction in
map_scale units
subplot_bottom distance between axes and bottom of figure window
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
title titiel of plot *default* is depth of slice
xminorticks location of xminorticks
yminorticks location of yminorticks
======================= ===================================================
"""
def __init__(self, model_fn=None, data_fn=None, station_fn=None,
initial_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.station_fn = station_fn
self.initial_fn = initial_fn
self.save_path = kwargs.pop('save_path', None)
if self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
elif self.initial_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.initial_fn)
if self.save_path is not None:
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.save_plots = kwargs.pop('save_plots', 'y')
self.depth_index = kwargs.pop('depth_index', None)
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale=='km':
self.dscale=1000.
elif self.map_scale=='m':
self.dscale=1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.plot_grid = kwargs.pop('plot_grid', 'n')
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.fig_list = []
self.xminorticks = kwargs.pop('xminorticks', 1000)
self.yminorticks = kwargs.pop('yminorticks', 1000)
self.climits = kwargs.pop('climits', (0,4))
self.cmap = kwargs.pop('cmap', 'jet_r')
self.font_size = kwargs.pop('font_size', 8)
self.cb_shrink = kwargs.pop('cb_shrink', .8)
self.cb_pad = kwargs.pop('cb_pad', .01)
self.cb_orientation = kwargs.pop('cb_orientation', 'horizontal')
self.cb_location = kwargs.pop('cb_location', None)
self.subplot_right = .99
self.subplot_left = .085
self.subplot_top = .92
self.subplot_bottom = .1
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def read_files(self):
"""
read in the files to get appropriate information
"""
#--> read in model file
if self.model_fn is not None:
if os.path.isfile(self.model_fn) == True:
wsmodel = WSModel(self.model_fn)
self.res_model = wsmodel.res_model
self.grid_east = wsmodel.grid_east/self.dscale
self.grid_north = wsmodel.grid_north/self.dscale
self.grid_z = wsmodel.grid_z/self.dscale
self.nodes_east = wsmodel.nodes_east/self.dscale
self.nodes_north = wsmodel.nodes_north/self.dscale
self.nodes_z = wsmodel.nodes_z/self.dscale
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.model_fn))
#--> read in data file to get station locations
if self.data_fn is not None:
if os.path.isfile(self.data_fn) == True:
wsdata = WSData()
wsdata.read_data_file(self.data_fn)
self.station_east = wsdata.data['east']/self.dscale
self.station_north = wsdata.data['north']/self.dscale
self.station_names = wsdata.data['station']
else:
print('Could not find data file {0}'.format(self.data_fn))
#--> read in station file
if self.station_fn is not None:
if os.path.isfile(self.station_fn) == True:
wsstations = WSStation(self.station_fn)
wsstations.read_station_file()
self.station_east = wsstations.east/self.dscale
self.station_north = wsstations.north/self.dscale
self.station_names = wsstations.names
else:
print('Could not find station file {0}'.format(self.station_fn))
#--> read in initial file
if self.initial_fn is not None:
if os.path.isfile(self.initial_fn) == True:
wsmesh = WSMesh()
wsmesh.read_initial_file(self.initial_fn)
self.grid_east = wsmesh.grid_east/self.dscale
self.grid_north = wsmesh.grid_north/self.dscale
self.grid_z = wsmesh.grid_z/self.dscale
self.nodes_east = wsmesh.nodes_east/self.dscale
self.nodes_north = wsmesh.nodes_north/self.dscale
self.nodes_z = wsmesh.nodes_z/self.dscale
#need to convert index values to resistivity values
rdict = dict([(ii,res) for ii,res in enumerate(wsmesh.res_list,1)])
for ii in range(len(wsmesh.res_list)):
self.res_model[np.where(wsmesh.res_model==ii+1)] = \
rdict[ii+1]
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.initial_fn))
if self.initial_fn is None and self.model_fn is None:
raise mtex.MTpyError_inputarguments('Need to input either a model'
' file or initial file.')
def plot(self):
"""
plot depth slices
"""
#--> get information from files
self.read_files()
fdict = {'size':self.font_size+2, 'weight':'bold'}
cblabeldict={-2:'$10^{-3}$',-1:'$10^{-1}$',0:'$10^{0}$',1:'$10^{1}$',
2:'$10^{2}$',3:'$10^{3}$',4:'$10^{4}$',5:'$10^{5}$',
6:'$10^{6}$',7:'$10^{7}$',8:'$10^{8}$'}
#create an list of depth slices to plot
if self.depth_index == None:
zrange = list(range(self.grid_z.shape[0]))
elif type(self.depth_index) is int:
zrange = [self.depth_index]
elif type(self.depth_index) is list or \
type(self.depth_index) is np.ndarray:
zrange = self.depth_index
#set the limits of the plot
if self.ew_limits == None:
if self.station_east is not None:
xlimits = (np.floor(self.station_east.min()),
np.ceil(self.station_east.max()))
else:
xlimits = (self.grid_east[5], self.grid_east[-5])
else:
xlimits = self.ew_limits
if self.ns_limits == None:
if self.station_north is not None:
ylimits = (np.floor(self.station_north.min()),
np.ceil(self.station_north.max()))
else:
ylimits = (self.grid_north[5], self.grid_north[-5])
else:
ylimits = self.ns_limits
#make a mesh grid of north and east
self.mesh_east, self.mesh_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
plt.rcParams['font.size'] = self.font_size
#--> plot depths into individual figures
for ii in zrange:
depth = '{0:.3f} ({1})'.format(self.grid_z[ii],
self.map_scale)
fig = plt.figure(depth, figsize=self.fig_size, dpi=self.fig_dpi)
plt.clf()
ax1 = fig.add_subplot(1, 1, 1, aspect=self.fig_aspect)
plot_res = np.log10(self.res_model[:, :, ii].T)
mesh_plot = ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
ax1.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
#set axis properties
ax1.set_xlim(xlimits)
ax1.set_ylim(ylimits)
ax1.xaxis.set_minor_locator(MultipleLocator(self.xminorticks/self.dscale))
ax1.yaxis.set_minor_locator(MultipleLocator(self.yminorticks/self.dscale))
ax1.set_ylabel('Northing ('+self.map_scale+')',fontdict=fdict)
ax1.set_xlabel('Easting ('+self.map_scale+')',fontdict=fdict)
ax1.set_title('Depth = {0}'.format(depth), fontdict=fdict)
#plot the grid if desired
if self.plot_grid == 'y':
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
east_line_ylist.append(None)
ax1.plot(east_line_xlist,
east_line_ylist,
lw=.25,
color='k')
north_line_xlist = []
north_line_ylist = []
for yy in self.grid_north:
north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
north_line_xlist.append(None)
north_line_ylist.extend([yy, yy])
north_line_ylist.append(None)
ax1.plot(north_line_xlist,
north_line_ylist,
lw=.25,
color='k')
#plot the colorbar
if self.cb_location is None:
if self.cb_orientation == 'horizontal':
self.cb_location = (ax1.axes.figbox.bounds[3]-.225,
ax1.axes.figbox.bounds[1]+.05,.3,.025)
elif self.cb_orientation == 'vertical':
self.cb_location = ((ax1.axes.figbox.bounds[2]-.15,
ax1.axes.figbox.bounds[3]-.21,.025,.3))
ax2 = fig.add_axes(self.cb_location)
cb = mcb.ColorbarBase(ax2,
cmap=self.cmap,
norm=Normalize(vmin=self.climits[0],
vmax=self.climits[1]),
orientation=self.cb_orientation)
if self.cb_orientation == 'horizontal':
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5,1.3)
elif self.cb_orientation == 'vertical':
cb.ax.yaxis.set_label_position('right')
cb.ax.yaxis.set_label_coords(1.25,.5)
cb.ax.yaxis.tick_left()
cb.ax.tick_params(axis='y',direction='in')
cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size+1})
cb.set_ticks(np.arange(self.climits[0],self.climits[1]+1))
cb.set_ticklabels([cblabeldict[cc]
for cc in np.arange(self.climits[0],
self.climits[1]+1)])
self.fig_list.append(fig)
#--> save plots to a common folder
if self.save_plots == 'y':
fig.savefig(os.path.join(self.save_path,
"Depth_{}_{:.4f}.png".format(ii, self.grid_z[ii])),
dpi=self.fig_dpi, bbox_inches='tight')
fig.clear()
plt.close()
else:
pass
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def update_plot(self, fig):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots depth slices of model from WS3DINV")
#==============================================================================
# plot phase tensors
#==============================================================================
class PlotPTMaps(mtplottools.MTEllipse):
"""
Plot phase tensor maps including residual pt if response file is input.
:Plot only data for one period: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> ptm = ws.PlotPTMaps(data_fn=dfn, plot_period_list=[0])
:Plot data and model response: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> rfn = r"/home/MT/ws3dinv/Inv1/Test_resp.00"
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> ptm = ws.PlotPTMaps(data_fn=dfn, resp_fn=rfn, model_fn=mfn,
>>> ... plot_period_list=[0])
>>> # adjust colorbar
>>> ptm.cb_res_pad = 1.25
>>> ptm.redraw_plot()
========================== ================================================
Attributes Description
========================== ================================================
cb_pt_pad percentage from top of axes to place pt
color bar. *default* is .90
cb_res_pad percentage from bottom of axes to place
resistivity color bar. *default* is 1.2
cb_residual_tick_step tick step for residual pt. *default* is 3
cb_tick_step tick step for phase tensor color bar,
*default* is 45
data np.ndarray(n_station, n_periods, 2, 2)
impedance tensors for station data
data_fn full path to data fle
dscale scaling parameter depending on map_scale
ellipse_cmap color map for pt ellipses. *default* is
mt_bl2gr2rd
ellipse_colorby [ 'skew' | 'skew_seg' | 'phimin' | 'phimax'|
'phidet' | 'ellipticity' ] parameter to color
ellipses by. *default* is 'phimin'
ellipse_range (min, max, step) min and max of colormap, need
to input step if plotting skew_seg
ellipse_size relative size of ellipses in map_scale
ew_limits limits of plot in e-w direction in map_scale
units. *default* is None, scales to station
area
fig_aspect aspect of figure. *default* is 1
fig_dpi resolution in dots-per-inch. *default* is 300
fig_list list of matplotlib.figure instances for each
figure plotted.
fig_size [width, height] in inches of figure window
*default* is [6, 6]
font_size font size of ticklabels, axes labels are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
initial_fn full path to initial file
map_scale [ 'km' | 'm' ] distance units of map.
*default* is km
mesh_east np.meshgrid(grid_east, grid_north, indexing='ij')
mesh_north np.meshgrid(grid_east, grid_north, indexing='ij')
model_fn full path to model file
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
ns_limits (min, max) limits of plot in n-s direction
*default* is None, viewing area is station area
pad_east padding from extreme stations in east direction
pad_north padding from extreme stations in north direction
period_list list of periods from data
plot_grid [ 'y' | 'n' ] 'y' to plot grid lines
*default* is 'n'
plot_period_list list of period index values to plot
*default* is None
plot_yn ['y' | 'n' ] 'y' to plot on instantiation
*default* is 'y'
res_cmap colormap for resisitivity values.
*default* is 'jet_r'
res_limits (min, max) resistivity limits in log scale
*default* is (0, 4)
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
residual_cmap color map for pt residuals.
*default* is 'mt_wh2or'
resp np.ndarray(n_stations, n_periods, 2, 2)
impedance tensors for model response
resp_fn full path to response file
save_path directory to save figures to
save_plots [ 'y' | 'n' ] 'y' to save plots to save_path
station_east location of stations in east direction in
map_scale units
station_fn full path to station locations file
station_names station names
station_north location of station in north direction in
map_scale units
subplot_bottom distance between axes and bottom of figure window
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
title titiel of plot *default* is depth of slice
xminorticks location of xminorticks
yminorticks location of yminorticks
========================== ================================================
"""
def __init__(self, data_fn=None, resp_fn=None, station_fn=None,
model_fn=None, initial_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.station_fn = station_fn
self.resp_fn = resp_fn
self.initial_fn = initial_fn
self.save_path = kwargs.pop('save_path', None)
if self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
elif self.initial_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.initial_fn)
if self.save_path is not None:
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.save_plots = kwargs.pop('save_plots', 'y')
self.plot_period_list = kwargs.pop('plot_period_list', None)
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale=='km':
self.dscale=1000.
elif self.map_scale=='m':
self.dscale=1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.pad_east = kwargs.pop('pad_east', 2)
self.pad_north = kwargs.pop('pad_north', 2)
self.plot_grid = kwargs.pop('plot_grid', 'n')
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.fig_list = []
self.xminorticks = kwargs.pop('xminorticks', 1000)
self.yminorticks = kwargs.pop('yminorticks', 1000)
self.residual_cmap = kwargs.pop('residual_cmap', 'mt_wh2or')
self.font_size = kwargs.pop('font_size', 7)
self.cb_tick_step = kwargs.pop('cb_tick_step', 45)
self.cb_residual_tick_step = kwargs.pop('cb_residual_tick_step', 3)
self.cb_pt_pad = kwargs.pop('cb_pt_pad', .90)
self.cb_res_pad = kwargs.pop('cb_res_pad', 1.22)
self.res_limits = kwargs.pop('res_limits', (0,4))
self.res_cmap = kwargs.pop('res_cmap', 'jet_r')
#--> set the ellipse properties -------------------
self._ellipse_dict = kwargs.pop('ellipse_dict', {})
self._read_ellipse_dict()
self.subplot_right = .99
self.subplot_left = .085
self.subplot_top = .92
self.subplot_bottom = .1
self.subplot_hspace = .2
self.subplot_wspace = .05
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.data = None
self.resp = None
self.period_list = None
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def _get_pt(self):
"""
get phase tensors
"""
#--> read in data file
if self.data_fn is None:
raise mtex.MTpyError_inputarguments('Need to input a data file')
wsdata = WSData()
wsdata.read_data_file(self.data_fn, station_fn=self.station_fn)
self.data = wsdata.z_data
self.period_list = wsdata.period_list
self.station_east = wsdata.station_east/self.dscale
self.station_north = wsdata.station_north/self.dscale
self.station_names = wsdata.station_names
if self.plot_period_list is None:
self.plot_period_list = self.period_list
else:
if type(self.plot_period_list) is list:
#check if entries are index values or actual periods
if type(self.plot_period_list[0]) is int:
self.plot_period_list = [self.period_list[ii]
for ii in self.plot_period_list]
else:
pass
elif type(self.plot_period_list) is int:
self.plot_period_list = self.period_list[self.plot_period_list]
#--> read model file
if self.model_fn is not None:
wsmodel = WSModel(self.model_fn)
self.res_model = wsmodel.res_model
self.grid_east = wsmodel.grid_east/self.dscale
self.grid_north = wsmodel.grid_north/self.dscale
self.grid_z = wsmodel.grid_z/self.dscale
self.mesh_east, self.mesh_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
#--> read response file
if self.resp_fn is not None:
wsresp = WSResponse(self.resp_fn)
self.resp = wsresp.z_resp
def plot(self):
"""
plot phase tensor maps for data and or response, each figure is of a
different period. If response is input a third column is added which is
the residual phase tensor showing where the model is not fitting the data
well. The data is plotted in km in units of ohm-m.
Inputs:
data_fn = full path to data file
resp_fn = full path to response file, if none just plots data
sites_fn = full path to sites file
periodlst = indicies of periods you want to plot
esize = size of ellipses as:
0 = phase tensor ellipse
1 = phase tensor residual
2 = resistivity tensor ellipse
3 = resistivity tensor residual
ecolor = 'phimin' for coloring with phimin or 'beta' for beta coloring
colormm = list of min and max coloring for plot, list as follows:
0 = phase tensor min and max for ecolor in degrees
1 = phase tensor residual min and max [0,1]
2 = resistivity tensor coloring as resistivity on log scale
3 = resistivity tensor residual coloring as resistivity on
linear scale
xpad = padding of map from stations at extremities (km)
units = 'mv' to convert to Ohm-m
dpi = dots per inch of figure
"""
self._get_pt()
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
gs = gridspec.GridSpec(1, 3, hspace=self.subplot_hspace,
wspace=self.subplot_wspace)
font_dict = {'size':self.font_size+2, 'weight':'bold'}
n_stations = self.data.shape[0]
#set some local parameters
ckmin = float(self.ellipse_range[0])
ckmax = float(self.ellipse_range[1])
try:
ckstep = float(self.ellipse_range[2])
except IndexError:
if self.ellipse_cmap == 'mt_seg_bl2wh2rd':
raise ValueError('Need to input range as (min, max, step)')
else:
ckstep = 3
nseg = float((ckmax-ckmin)/(2*ckstep))
if self.ew_limits == None:
if self.station_east is not None:
self.ew_limits = (np.floor(self.station_east.min())-
self.pad_east,
np.ceil(self.station_east.max())+
self.pad_east)
else:
self.ew_limits = (self.grid_east[5], self.grid_east[-5])
if self.ns_limits == None:
if self.station_north is not None:
self.ns_limits = (np.floor(self.station_north.min())-
self.pad_north,
np.ceil(self.station_north.max())+
self.pad_north)
else:
self.ns_limits = (self.grid_north[5], self.grid_north[-5])
for ff, per in enumerate(self.plot_period_list):
print('Plotting Period: {0:.5g}'.format(per))
fig = plt.figure('{0:.5g}'.format(per), figsize=self.fig_size,
dpi=self.fig_dpi)
fig.clf()
if self.resp_fn is not None:
axd = fig.add_subplot(gs[0, 0], aspect='equal')
axm = fig.add_subplot(gs[0, 1], aspect='equal')
axr = fig.add_subplot(gs[0, 2], aspect='equal')
ax_list = [axd, axm, axr]
else:
axd = fig.add_subplot(gs[0, :], aspect='equal')
ax_list = [axd]
#plot model below the phase tensors
if self.model_fn is not None:
approx_depth, d_index = estimate_skin_depth(self.res_model,
self.grid_z,
per,
dscale=self.dscale)
for ax in ax_list:
plot_res = np.log10(self.res_model[:, :, d_index].T)
ax.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.res_cmap,
vmin=self.res_limits[0],
vmax=self.res_limits[1])
#--> get phase tensors
pt = mtpt.PhaseTensor(z_array=self.data[:, ff],
freq=np.repeat(per, n_stations))
if self.resp is not None:
mpt = mtpt.PhaseTensor(z_array=self.resp[:, ff],
freq=np.repeat(per, n_stations))
rpt = mtpt.ResidualPhaseTensor(pt_object1=pt, pt_object2=mpt)
rpt = rpt.residual_pt
rcarray = np.sqrt(abs(rpt.phimin[0]*rpt.phimax[0]))
rcmin = np.floor(rcarray.min())
rcmax = np.floor(rcarray.max())
#--> get color array
if self.ellipse_cmap == 'mt_seg_bl2wh2rd':
bounds = np.arange(ckmin, ckmax+ckstep, ckstep)
nseg = float((ckmax-ckmin)/(2*ckstep))
#get the properties to color the ellipses by
if self.ellipse_colorby == 'phiminang' or \
self.ellipse_colorby == 'phimin':
colorarray = pt.phimin[0]
if self.resp is not None:
mcarray = mpt.phimin[0]
elif self.ellipse_colorby == 'phidet':
colorarray = np.sqrt(abs(pt.det[0]))*(180/np.pi)
if self.resp is not None:
mcarray = np.sqrt(abs(mpt.det[0]))*(180/np.pi)
elif self.ellipse_colorby == 'skew' or\
self.ellipse_colorby == 'skew_seg':
colorarray = pt.beta[0]
if self.resp is not None:
mcarray = mpt.beta[0]
elif self.ellipse_colorby == 'ellipticity':
colorarray = pt.ellipticity[0]
if self.resp is not None:
mcarray = mpt.ellipticity[0]
else:
raise NameError(self.ellipse_colorby+' is not supported')
#--> plot phase tensor ellipses for each stations
for jj in range(n_stations):
#-----------plot data phase tensors---------------
eheight = pt.phimin[0][jj]/pt.phimax[0].max()*self.ellipse_size
ewidth = pt.phimax[0][jj]/pt.phimax[0].max()*self.ellipse_size
ellipse = Ellipse((self.station_east[jj],
self.station_north[jj]),
width=ewidth,
height=eheight,
angle=90-pt.azimuth[0][jj])
#get ellipse color
if self.ellipse_cmap.find('seg')>0:
ellipse.set_facecolor(mtcl.get_plot_color(colorarray[jj],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipse.set_facecolor(mtcl.get_plot_color(colorarray[jj],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax))
axd.add_artist(ellipse)
if self.resp is not None:
#-----------plot response phase tensors---------------
eheight = mpt.phimin[0][jj]/mpt.phimax[0].max()*\
self.ellipse_size
ewidth = mpt.phimax[0][jj]/mpt.phimax[0].max()*\
self.ellipse_size
ellipsem = Ellipse((self.station_east[jj],
self.station_north[jj]),
width=ewidth,
height=eheight,
angle=90-mpt.azimuth[0][jj])
#get ellipse color
if self.ellipse_cmap.find('seg')>0:
ellipsem.set_facecolor(mtcl.get_plot_color(mcarray[jj],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipsem.set_facecolor(mtcl.get_plot_color(mcarray[jj],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax))
axm.add_artist(ellipsem)
#-----------plot residual phase tensors---------------
eheight = rpt.phimin[0][jj]/rpt.phimax[0].max()*\
self.ellipse_size
ewidth = rpt.phimax[0][jj]/rpt.phimax[0].max()*\
self.ellipse_size
ellipser = Ellipse((self.station_east[jj],
self.station_north[jj]),
width=ewidth,
height=eheight,
angle=rpt.azimuth[0][jj])
#get ellipse color
if self.ellipse_cmap.find('seg')>0:
ellipser.set_facecolor(mtcl.get_plot_color(rcarray[jj],
self.ellipse_colorby,
self.residual_cmap,
rcmin,
rcmax,
bounds=bounds))
else:
ellipser.set_facecolor(mtcl.get_plot_color(rcarray[jj],
self.ellipse_colorby,
self.residual_cmap,
rcmin,
rcmax))
axr.add_artist(ellipser)
#--> set axes properties
# data
axd.set_xlim(self.ew_limits)
axd.set_ylim(self.ns_limits)
axd.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=font_dict)
axd.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=font_dict)
#make a colorbar for phase tensors
#bb = axd.axes.get_position().bounds
bb = axd.get_position().bounds
y1 = .25*(2+(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_location = (3.35*bb[2]/5+bb[0],
y1*self.cb_pt_pad, .295*bb[2], .02)
cbaxd = fig.add_axes(cb_location)
cbd = mcb.ColorbarBase(cbaxd,
cmap=mtcl.cmapdict[self.ellipse_cmap],
norm=Normalize(vmin=ckmin,
vmax=ckmax),
orientation='horizontal')
cbd.ax.xaxis.set_label_position('top')
cbd.ax.xaxis.set_label_coords(.5, 1.75)
cbd.set_label(mtplottools.ckdict[self.ellipse_colorby])
cbd.set_ticks(np.arange(ckmin, ckmax+self.cb_tick_step,
self.cb_tick_step))
axd.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Data',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
#Model and residual
if self.resp is not None:
for aa, ax in enumerate([axm, axr]):
ax.set_xlim(self.ew_limits)
ax.set_ylim(self.ns_limits)
ax.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=font_dict)
plt.setp(ax.yaxis.get_ticklabels(), visible=False)
#make a colorbar ontop of axis
bb = ax.axes.get_position().bounds
y1 = .25*(2+(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_location = (3.35*bb[2]/5+bb[0],
y1*self.cb_pt_pad, .295*bb[2], .02)
cbax = fig.add_axes(cb_location)
if aa == 0:
cb = mcb.ColorbarBase(cbax,
cmap=mtcl.cmapdict[self.ellipse_cmap],
norm=Normalize(vmin=ckmin,
vmax=ckmax),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.75)
cb.set_label(mtplottools.ckdict[self.ellipse_colorby])
cb.set_ticks(np.arange(ckmin, ckmax+self.cb_tick_step,
self.cb_tick_step))
ax.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Model',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
else:
cb = mcb.ColorbarBase(cbax,
cmap=mtcl.cmapdict[self.residual_cmap],
norm=Normalize(vmin=rcmin,
vmax=rcmax),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.75)
cb.set_label(r"$\sqrt{\Phi_{min} \Phi_{max}}$")
cb_ticks = np.arange(rcmin,
rcmax+self.cb_residual_tick_step,
self.cb_residual_tick_step)
cb.set_ticks(cb_ticks)
ax.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Residual',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
if self.model_fn is not None:
for ax in ax_list:
ax.tick_params(direction='out')
bb = ax.axes.get_position().bounds
y1 = .25*(2-(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_position = (3.0*bb[2]/5+bb[0],
y1*self.cb_res_pad, .35*bb[2], .02)
cbax = fig.add_axes(cb_position)
cb = mcb.ColorbarBase(cbax,
cmap=self.res_cmap,
norm=Normalize(vmin=self.res_limits[0],
vmax=self.res_limits[1]),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.5)
cb.set_label('Resistivity ($\Omega \cdot$m)')
cb_ticks = np.arange(np.floor(self.res_limits[0]),
np.ceil(self.res_limits[1]+1), 1)
cb.set_ticks(cb_ticks)
cb.set_ticklabels([mtplottools.labeldict[ctk] for ctk in cb_ticks])
plt.show()
self.fig_list.append(fig)
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def save_figure(self, save_path=None, fig_dpi=None, file_format='pdf',
orientation='landscape', close_fig='y'):
"""
save_figure will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_path) == False:
try:
os.mkdir(save_path)
except:
raise IOError('Need to input a correct directory path')
for fig in self.fig_list:
per = fig.canvas.get_window_title()
save_fn = os.path.join(save_path, 'PT_DepthSlice_{0}s.{1}'.format(
per, file_format))
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.close(fig)
else:
pass
self.fig_fn = save_fn
print('Saved figure to: '+self.fig_fn)
#==============================================================================
# ESTIMATE SKIN DEPTH FOR MODEL
#==============================================================================
def estimate_skin_depth(res_model, grid_z, period, dscale=1000):
"""
estimate the skin depth from the resistivity model assuming that
delta_skin ~ 500 * sqrt(rho_a*T)
Arguments:
-----------
**resmodel** : np.ndarray (n_north, n_east, n_z)
array of resistivity values for model grid
**grid_z** : np.ndarray (n_z)
array of depth layers in m or km, be sure to change
dscale accordingly
**period** : float
period in seconds to estimate a skin depth for
**dscale** : [1000 | 1]
scaling value to scale depth estimation to meters (1) or
kilometers (1000)
Outputs:
---------
**depth** : float
estimated skin depth in units according to dscale
**depth_index** : int
index value of grid_z that corresponds to the
estimated skin depth.
"""
if dscale == 1000:
ms = 'km'
ds = .5
if dscale == 1:
ms = 'm'
ds = 500.
#find the apparent resisitivity of each depth slice within the station area
apparent_res_xy = np.array([res_model[6:-6, 6:-6, 0:ii+1].mean()
for ii in range(grid_z.shape[0])])
#calculate the period for each skin depth
skin_depth_period = np.array([(zz/ds)**2*(1/rho_a)
for zz, rho_a in zip(grid_z, apparent_res_xy)])
#match the period
try:
period_index = np.where(skin_depth_period >= period)[0][0]
except IndexError:
period_index = len(skin_depth_period)-1
#get the depth slice
depth = grid_z[period_index]
print('-'*60)
print(' input period {0:.6g} (s)'.format(period))
print(' estimated skin depth period {0:.6g} (s)'.format(
skin_depth_period[period_index]))
print(' estimate apparent resisitivity {0:.0f} (Ohm-m)'.format(
apparent_res_xy[period_index].mean()))
print(' estimated depth {0:.6g} ({1})'.format(depth, ms))
print(' index {0}'.format(period_index))
print('-'*60)
return depth, period_index
#==============================================================================
# plot slices
#==============================================================================
class PlotSlices(object):
"""
plot all slices and be able to scroll through the model
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> sfn = r"/home/MT/ws3dinv/Inv1/WSStationLocations.txt"
>>> # plot just first layer to check the formating
>>> pds = ws.PlotSlices(model_fn=mfn, station_fn=sfn)
======================= ===================================================
Buttons Description
======================= ===================================================
'e' moves n-s slice east by one model block
'w' moves n-s slice west by one model block
'n' moves e-w slice north by one model block
'm' moves e-w slice south by one model block
'd' moves depth slice down by one model block
'u' moves depth slice up by one model block
======================= ===================================================
======================= ===================================================
Attributes Description
======================= ===================================================
ax_en matplotlib.axes instance for depth slice map view
ax_ez matplotlib.axes instance for e-w slice
ax_map matplotlib.axes instance for location map
ax_nz matplotlib.axes instance for n-s slice
climits (min , max) color limits on resistivity in log
scale. *default* is (0, 4)
cmap name of color map for resisitiviy.
*default* is 'jet_r'
data_fn full path to data file name
dscale scaling parameter depending on map_scale
east_line_xlist list of line nodes of east grid for faster plotting
east_line_ylist list of line nodes of east grid for faster plotting
ew_limits (min, max) limits of e-w in map_scale units
*default* is None and scales to station area
fig matplotlib.figure instance for figure
fig_aspect aspect ratio of plots. *default* is 1
fig_dpi resolution of figure in dots-per-inch
*default* is 300
fig_num figure instance number
fig_size [width, height] of figure window.
*default* is [6,6]
font_dict dictionary of font keywords, internally created
font_size size of ticklables in points, axes labes are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
index_east index value of grid_east being plotted
index_north index value of grid_north being plotted
index_vertical index value of grid_z being plotted
initial_fn full path to initial file
key_press matplotlib.canvas.connect instance
map_scale [ 'm' | 'km' ] scale of map. *default* is km
mesh_east np.meshgrid(grid_east, grid_north)[0]
mesh_en_east np.meshgrid(grid_east, grid_north)[0]
mesh_en_north np.meshgrid(grid_east, grid_north)[1]
mesh_ez_east np.meshgrid(grid_east, grid_z)[0]
mesh_ez_vertical np.meshgrid(grid_east, grid_z)[1]
mesh_north np.meshgrid(grid_east, grid_north)[1]
mesh_nz_north np.meshgrid(grid_north, grid_z)[0]
mesh_nz_vertical np.meshgrid(grid_north, grid_z)[1]
model_fn full path to model file
ms size of station markers in points. *default* is 2
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
north_line_xlist list of line nodes north grid for faster plotting
north_line_ylist list of line nodes north grid for faster plotting
ns_limits (min, max) limits of plots in n-s direction
*default* is None, set veiwing area to station area
plot_yn [ 'y' | 'n' ] 'y' to plot on instantiation
*default* is 'y'
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
station_color color of station marker. *default* is black
station_dict_east location of stations for each east grid row
station_dict_north location of stations for each north grid row
station_east location of stations in east direction
station_fn full path to station file
station_font_color color of station label
station_font_pad padding between station marker and label
station_font_rotation angle of station label
station_font_size font size of station label
station_font_weight weight of font for station label
station_id [min, max] index values for station labels
station_marker station marker
station_names name of stations
station_north location of stations in north direction
subplot_bottom distance between axes and bottom of figure window
subplot_hspace distance between subplots in vertical direction
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
subplot_wspace distance between subplots in horizontal direction
title title of plot
z_limits (min, max) limits in vertical direction,
======================= ===================================================
"""
def __init__(self, model_fn, data_fn=None, station_fn=None,
initial_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.station_fn = station_fn
self.initial_fn = initial_fn
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.font_size = kwargs.pop('font_size', 7)
self.subplot_wspace = .20
self.subplot_hspace = .30
self.subplot_right = .98
self.subplot_left = .08
self.subplot_top = .97
self.subplot_bottom = .1
self.index_vertical = kwargs.pop('index_vertical', 0)
self.index_east = kwargs.pop('index_east', 0)
self.index_north = kwargs.pop('index_north', 0)
self.cmap = kwargs.pop('cmap', 'jet_r')
self.climits = kwargs.pop('climits', (0, 4))
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale=='km':
self.dscale=1000.
elif self.map_scale=='m':
self.dscale=1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.z_limits = kwargs.pop('z_limits', None)
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.station_id = kwargs.pop('station_id', None)
self.station_font_size = kwargs.pop('station_font_size', 8)
self.station_font_pad = kwargs.pop('station_font_pad', 1.0)
self.station_font_weight = kwargs.pop('station_font_weight', 'bold')
self.station_font_rotation = kwargs.pop('station_font_rotation', 60)
self.station_font_color = kwargs.pop('station_font_color', 'k')
self.station_marker = kwargs.pop('station_marker',
r"$\blacktriangledown$")
self.station_color = kwargs.pop('station_color', 'k')
self.ms = kwargs.pop('ms', 10)
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def read_files(self):
"""
read in the files to get appropriate information
"""
#--> read in model file
if self.model_fn is not None:
if os.path.isfile(self.model_fn) == True:
wsmodel = WSModel(self.model_fn)
self.res_model = wsmodel.res_model
self.grid_east = wsmodel.grid_east/self.dscale
self.grid_north = wsmodel.grid_north/self.dscale
self.grid_z = wsmodel.grid_z/self.dscale
self.nodes_east = wsmodel.nodes_east/self.dscale
self.nodes_north = wsmodel.nodes_north/self.dscale
self.nodes_z = wsmodel.nodes_z/self.dscale
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.model_fn))
#--> read in data file to get station locations
if self.data_fn is not None:
if os.path.isfile(self.data_fn) == True:
wsdata = WSData()
wsdata.read_data_file(self.data_fn)
self.station_east = wsdata.data['east']/self.dscale
self.station_north = wsdata.data['north']/self.dscale
self.station_names = wsdata.data['station']
else:
print('Could not find data file {0}'.format(self.data_fn))
#--> read in station file
if self.station_fn is not None:
if os.path.isfile(self.station_fn) == True:
wsstations = WSStation(self.station_fn)
wsstations.read_station_file()
self.station_east = wsstations.east/self.dscale
self.station_north = wsstations.north/self.dscale
self.station_names = wsstations.names
else:
print('Could not find station file {0}'.format(self.station_fn))
#--> read in initial file
if self.initial_fn is not None:
if os.path.isfile(self.initial_fn) == True:
wsmesh = WSMesh()
wsmesh.read_initial_file(self.initial_fn)
self.grid_east = wsmesh.grid_east/self.dscale
self.grid_north = wsmesh.grid_north/self.dscale
self.grid_z = wsmesh.grid_z/self.dscale
self.nodes_east = wsmesh.nodes_east/self.dscale
self.nodes_north = wsmesh.nodes_north/self.dscale
self.nodes_z = wsmesh.nodes_z/self.dscale
#need to convert index values to resistivity values
rdict = dict([(ii,res) for ii,res in enumerate(wsmesh.res_list,1)])
for ii in range(len(wsmesh.res_list)):
self.res_model[np.where(wsmesh.res_model==ii+1)] = \
rdict[ii+1]
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.initial_fn))
if self.initial_fn is None and self.model_fn is None:
raise mtex.MTpyError_inputarguments('Need to input either a model'
' file or initial file.')
def plot(self):
"""
plot:
east vs. vertical,
north vs. vertical,
east vs. north
"""
self.read_files()
self.get_station_grid_locations()
self.font_dict = {'size':self.font_size+2, 'weight':'bold'}
#set the limits of the plot
if self.ew_limits == None:
if self.station_east is not None:
self.ew_limits = (np.floor(self.station_east.min()),
np.ceil(self.station_east.max()))
else:
self.ew_limits = (self.grid_east[5], self.grid_east[-5])
if self.ns_limits == None:
if self.station_north is not None:
self.ns_limits = (np.floor(self.station_north.min()),
np.ceil(self.station_north.max()))
else:
self.ns_limits = (self.grid_north[5], self.grid_north[-5])
if self.z_limits == None:
self.z_limits = (self.grid_z[0]-5000/self.dscale,
self.grid_z[-5])
self.fig = plt.figure(self.fig_num, figsize=self.fig_size,
dpi=self.fig_dpi)
plt.clf()
gs = gridspec.GridSpec(2, 2,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace)
#make subplots
self.ax_ez = self.fig.add_subplot(gs[0, 0], aspect=self.fig_aspect)
self.ax_nz = self.fig.add_subplot(gs[1, 1], aspect=self.fig_aspect)
self.ax_en = self.fig.add_subplot(gs[1, 0], aspect=self.fig_aspect)
self.ax_map = self.fig.add_subplot(gs[0, 1])
#make grid meshes being sure the indexing is correct
self.mesh_ez_east, self.mesh_ez_vertical = np.meshgrid(self.grid_east,
self.grid_z,
indexing='ij')
self.mesh_nz_north, self.mesh_nz_vertical = np.meshgrid(self.grid_north,
self.grid_z,
indexing='ij')
self.mesh_en_east, self.mesh_en_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
#--> plot east vs vertical
self._update_ax_ez()
#--> plot north vs vertical
self._update_ax_nz()
#--> plot east vs north
self._update_ax_en()
#--> plot the grid as a map view
self._update_map()
#plot color bar
cbx = mcb.make_axes(self.ax_map, fraction=.15, shrink=.75, pad = .1)
cb = mcb.ColorbarBase(cbx[0],
cmap=self.cmap,
norm=Normalize(vmin=self.climits[0],
vmax=self.climits[1]))
cb.ax.yaxis.set_label_position('right')
cb.ax.yaxis.set_label_coords(1.25,.5)
cb.ax.yaxis.tick_left()
cb.ax.tick_params(axis='y',direction='in')
cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size+1})
cb.set_ticks(np.arange(np.ceil(self.climits[0]),
np.floor(self.climits[1]+1)))
cblabeldict={-2:'$10^{-3}$',-1:'$10^{-1}$',0:'$10^{0}$',1:'$10^{1}$',
2:'$10^{2}$',3:'$10^{3}$',4:'$10^{4}$',5:'$10^{5}$',
6:'$10^{6}$',7:'$10^{7}$',8:'$10^{8}$'}
cb.set_ticklabels([cblabeldict[cc]
for cc in np.arange(np.ceil(self.climits[0]),
np.floor(self.climits[1]+1))])
plt.show()
self.key_press = self.fig.canvas.mpl_connect('key_press_event',
self.on_key_press)
def on_key_press(self, event):
"""
on a key press change the slices
"""
key_press = event.key
if key_press == 'n':
if self.index_north == self.grid_north.shape[0]:
print('Already at northern most grid cell')
else:
self.index_north += 1
if self.index_north > self.grid_north.shape[0]:
self.index_north = self.grid_north.shape[0]
self._update_ax_ez()
self._update_map()
if key_press == 'm':
if self.index_north == 0:
print('Already at southern most grid cell')
else:
self.index_north -= 1
if self.index_north < 0:
self.index_north = 0
self._update_ax_ez()
self._update_map()
if key_press == 'e':
if self.index_east == self.grid_east.shape[0]:
print('Already at eastern most grid cell')
else:
self.index_east += 1
if self.index_east > self.grid_east.shape[0]:
self.index_east = self.grid_east.shape[0]
self._update_ax_nz()
self._update_map()
if key_press == 'w':
if self.index_east == 0:
print('Already at western most grid cell')
else:
self.index_east -= 1
if self.index_east < 0:
self.index_east = 0
self._update_ax_nz()
self._update_map()
if key_press == 'd':
if self.index_vertical == self.grid_z.shape[0]:
print('Already at deepest grid cell')
else:
self.index_vertical += 1
if self.index_vertical > self.grid_z.shape[0]:
self.index_vertical = self.grid_z.shape[0]
self._update_ax_en()
print('Depth = {0:.5g} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale))
if key_press == 'u':
if self.index_vertical == 0:
print('Already at surface grid cell')
else:
self.index_vertical -= 1
if self.index_vertical < 0:
self.index_vertical = 0
self._update_ax_en()
print('Depth = {0:.5gf} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale))
def _update_ax_ez(self):
"""
update east vs vertical plot
"""
self.ax_ez.cla()
plot_ez = np.log10(self.res_model[self.index_north, :, :])
self.ax_ez.pcolormesh(self.mesh_ez_east,
self.mesh_ez_vertical,
plot_ez,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot stations
for sx in self.station_dict_north[self.grid_north[self.index_north]]:
self.ax_ez.text(sx,
0,
self.station_marker,
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.ms,
'color':self.station_color})
self.ax_ez.set_xlim(self.ew_limits)
self.ax_ez.set_ylim(self.z_limits[1], self.z_limits[0])
self.ax_ez.set_ylabel('Depth ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_ez.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.fig.canvas.draw()
self._update_map()
def _update_ax_nz(self):
"""
update east vs vertical plot
"""
self.ax_nz.cla()
plot_nz = np.log10(self.res_model[:, self.index_east, :])
self.ax_nz.pcolormesh(self.mesh_nz_north,
self.mesh_nz_vertical,
plot_nz,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot stations
for sy in self.station_dict_east[self.grid_east[self.index_east]]:
self.ax_nz.text(sy,
0,
self.station_marker,
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.ms,
'color':self.station_color})
self.ax_nz.set_xlim(self.ns_limits)
self.ax_nz.set_ylim(self.z_limits[1], self.z_limits[0])
self.ax_nz.set_xlabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_nz.set_ylabel('Depth ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.fig.canvas.draw()
self._update_map()
def _update_ax_en(self):
"""
update east vs vertical plot
"""
self.ax_en.cla()
plot_en = np.log10(self.res_model[:, :, self.index_vertical].T)
self.ax_en.pcolormesh(self.mesh_en_east,
self.mesh_en_north,
plot_en,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
self.ax_en.set_xlim(self.ew_limits)
self.ax_en.set_ylim(self.ns_limits)
self.ax_en.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_en.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
#--> plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax_en.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
self.fig.canvas.draw()
self._update_map()
def _update_map(self):
self.ax_map.cla()
self.east_line_xlist = []
self.east_line_ylist = []
for xx in self.grid_east:
self.east_line_xlist.extend([xx, xx])
self.east_line_xlist.append(None)
self.east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
self.east_line_ylist.append(None)
self.ax_map.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.north_line_xlist = []
self.north_line_ylist = []
for yy in self.grid_north:
self.north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
self.north_line_xlist.append(None)
self.north_line_ylist.extend([yy, yy])
self.north_line_ylist.append(None)
self.ax_map.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#--> e-w indication line
self.ax_map.plot([self.grid_east.min(),
self.grid_east.max()],
[self.grid_north[self.index_north],
self.grid_north[self.index_north]],
lw=1,
color='g')
#--> e-w indication line
self.ax_map.plot([self.grid_east[self.index_east],
self.grid_east[self.index_east]],
[self.grid_north.min(),
self.grid_north.max()],
lw=1,
color='b')
#--> plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax_map.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
self.ax_map.set_xlim(self.ew_limits)
self.ax_map.set_ylim(self.ns_limits)
self.ax_map.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_map.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
#plot stations
self.ax_map.text(self.ew_limits[0]*.95, self.ns_limits[1]*.95,
'{0:.5g} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale),
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor': 'white'},
fontdict=self.font_dict)
self.fig.canvas.draw()
def get_station_grid_locations(self):
"""
get the grid line on which a station resides for plotting
"""
self.station_dict_east = dict([(gx, []) for gx in self.grid_east])
self.station_dict_north = dict([(gy, []) for gy in self.grid_north])
if self.station_east is not None:
for ss, sx in enumerate(self.station_east):
gx = np.where(self.grid_east <= sx)[0][-1]
self.station_dict_east[self.grid_east[gx]].append(self.station_north[ss])
for ss, sy in enumerate(self.station_north):
gy = np.where(self.grid_north <= sy)[0][-1]
self.station_dict_north[self.grid_north[gy]].append(self.station_east[ss])
else:
return
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
plt.close(self.fig)
self.plot()
def save_figure(self, save_fn=None, fig_dpi=None, file_format='pdf',
orientation='landscape', close_fig='y'):
"""
save_figure will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, '_E{0}_N{1}_Z{2}.{3}'.format(
self.index_east, self.index_north,
self.index_vertical, file_format))
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print('Saved figure to: '+self.fig_fn)
#==============================================================================
# STARTUP FILES
#==============================================================================
class WSStartup(object):
"""
read and write startup files
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> ifn = r"/home/MT/ws3dinv/Inv1/init3d"
>>> sws = ws.WSStartup(data_fn=dfn, initial_fn=ifn)
=================== =======================================================
Attributes Description
=================== =======================================================
apriori_fn full path to *a priori* model file
*default* is 'default'
control_fn full path to model index control file
*default* is 'default'
data_fn full path to data file
error_tol error tolerance level
*default* is 'default'
initial_fn full path to initial model file
lagrange starting lagrange multiplier
*default* is 'default'
max_iter max number of iterations
*default* is 10
model_ls model length scale
*default* is 5 0.3 0.3 0.3
output_stem output file name stem
*default* is 'ws3dinv'
save_path directory to save file to
startup_fn full path to startup file
static_fn full path to statics file
*default* is 'default'
target_rms target rms
*default* is 1.0
=================== =======================================================
"""
def __init__(self, data_fn=None, initial_fn=None, **kwargs):
self.data_fn = data_fn
self.initial_fn = initial_fn
self.output_stem = kwargs.pop('output_stem', 'ws3dinv')
self.apriori_fn = kwargs.pop('apriori_fn', 'default')
self.model_ls = kwargs.pop('model_ls', [5, 0.3, 0.3, 0.3])
self.target_rms = kwargs.pop('target_rms', 1.0)
self.control_fn = kwargs.pop('control_fn', 'default')
self.max_iter = kwargs.pop('max_iter', 10)
self.error_tol = kwargs.pop('error_tol', 'default')
self.static_fn = kwargs.pop('static_fn', 'default')
self.lagrange = kwargs.pop('lagrange', 'default')
self.save_path = kwargs.pop('save_path', None)
self.startup_fn = kwargs.pop('startup_fn', None)
self._startup_keys = ['data_file',
'output_file',
'initial_model_file',
'prior_model_file',
'control_model_index',
'target_rms',
'max_no_iteration',
'model_length_scale',
'lagrange_info',
'error_tol_level',
'static_file']
def write_startup_file(self):
"""
makes a startup file for WSINV3D.
"""
if self.data_fn is None:
raise IOError('Need to input data file name')
if self.initial_fn is None:
raise IOError('Need to input initial model file name')
#create the output filename
if self.save_path == None and self.data_fn != None:
self.startup_fn = os.path.join(os.path.dirname(self.data_fn),
'startup')
elif os.path.isdir(self.save_path) == True:
self.startup_fn = os.path.join(self.save_path, 'startup')
else:
self.startup_fn = self.save_path
slines = []
if os.path.dirname(self.startup_fn) == os.path.dirname(self.data_fn):
slines.append('{0:<20}{1}\n'.format('DATA_FILE',
os.path.basename(self.data_fn)))
if len(os.path.basename(self.data_fn)) > 70:
print('Data file is too long, going to get an error at runtime')
else:
slines.append('{0:<20}{1}\n'.format('DATA_FILE',self.data_fn))
if len(self.data_fn) > 70:
print('Data file is too long, going to get an error at runtime')
slines.append('{0:<20}{1}\n'.format('OUTPUT_FILE', self.output_stem))
if os.path.dirname(self.startup_fn) == os.path.dirname(self.initial_fn):
slines.append('{0:<20}{1}\n'.format('INITIAL_MODEL_FILE',
os.path.basename(self.initial_fn)))
else:
slines.append('{0:<20}{1}\n'.format('INITIAL_MODEL_FILE',
self.initial_fn))
slines.append('{0:<20}{1}\n'.format('PRIOR_MODEL_FILE',
self.apriori_fn))
slines.append('{0:<20}{1}\n'.format('CONTROL_MODEL_INDEX ',
self.control_fn))
slines.append('{0:<20}{1}\n'.format('TARGET_RMS', self.target_rms))
slines.append('{0:<20}{1}\n'.format('MAX_NO_ITERATION',
self.max_iter))
slines.append('{0:<20}{1:.0f} {2:.1f} {3:.1f} {4:.1f}\n'.format(
'MODEL_LENGTH_SCALE',
self.model_ls[0],
self.model_ls[1],
self.model_ls[2],
self.model_ls[3]))
slines.append('{0:<20}{1} \n'.format('LAGRANGE_INFO', self.lagrange))
slines.append('{0:<20}{1} \n'.format('ERROR_TOL_LEVEL',
self.error_tol))
slines.append('{0:<20}{1} \n'.format('STATIC_FILE', self.static_fn))
sfid = file(self.startup_fn, 'w')
sfid.write(''.join(slines))
sfid.close()
print('Wrote startup file to: {0}'.format(self.startup_fn))
def read_startup_file(self, startup_fn=None):
"""
read startup file fills attributes
"""
if startup_fn is not None:
self.startup_fn = startup_fn
if self.startup_fn is None:
raise IOError('Need to input startup file name')
self.save_path = os.path.dirname(self.startup_fn)
sfid = file(self.startup_fn, 'r')
slines = sfid.readlines()
sfid.close()
slines = [ss.strip().split()[1:] for ss in slines]
self.data_fn = slines[0][0].strip()
if self.data_fn.find(os.path.sep) == -1:
self.data_fn = os.path.join(self.save_path, self.data_fn)
self.output_stem = slines[1][0].strip()
self.initial_fn = slines[2][0].strip()
if self.initial_fn.find(os.path.sep) == -1:
self.initial_fn = os.path.join(self.save_path, self.initial_fn)
self.apriori_fn = slines[3][0].strip()
self.control_fn = slines[4][0].strip()
self.target_rms = float(slines[5][0].strip())
self.max_iter = int(slines[6][0].strip())
try:
self.model_ls = [int(slines[7][0]), float(slines[7][1]),
float(slines[7][2]), float(slines[7][3])]
except ValueError:
self.model_ls = slines[7][0]
self.lagrange = slines[8][0].strip()
self.error_tol = slines[9][0].strip()
try:
self.static_fn = slines[10][0].strip()
except IndexError:
print('Did not find static_fn')
#==============================================================================
# WRITE A VTK FILE TO IMAGE IN PARAVIEW OR MAYAVI
#==============================================================================
def write_vtk_res_model(res_model, grid_north, grid_east, grid_z, save_fn):
"""
Write a vtk file for resistivity as a structured grid
to be read into paraview or mayavi
**Doesn't work properly under windows**
adds extension automatically
"""
if os.path.isdir(save_fn) == True:
save_fn = os.path.join(save_fn, 'VTKResistivity_Model')
save_fn = gridToVTK(save_fn, grid_north, grid_east, grid_z,
cellData={'resistivity':res_model})
return save_fn
def write_vtk_stations(station_north, station_east, save_fn, station_z=None):
"""
Write a vtk file as points to be read into paraview or mayavi
**Doesn't work properly under windows**
adds extension automatically
"""
if os.path.isdir(save_fn) == True:
save_fn = os.path.join(save_fn, 'VTKStations')
if station_z is None:
station_z = np.zeros_like(station_north)
pointsToVTK(save_fn, station_north, station_east, station_z,
cellData={'value':np.ones_like(station_north)})
return save_fn
def write_vtk_files(model_fn, station_fn, save_path):
"""
writes vtk files
"""
wsstation = WSStation(station_fn=station_fn)
wsstation.read_station_file()
wsstation.write_vtk_file(save_path)
wsmodel = WSModel(model_fn)
wsmodel.write_vtk_file(save_path)
def computeMemoryUsage(nx, ny, nz, n_stations, n_zelements, n_period):
"""
compute the memory usage of a model
Arguments:
----------
**nx** : int
number of cells in N-S direction
**ny** : int
number of cells in E-W direction
**nz** : int
number of cells in vertical direction including air layers (7)
**n_stations** : int
number of stations
**n_zelements** : int
number of impedence tensor elements either 4 or 8
**n_period** : int
number of periods to invert for
Returns:
--------
**mem_req** : float
approximate memory useage in GB
"""
mem_req = 1.2*(8*(n_stations*n_period*n_zelements)**2+
8*(nx*ny*nz*n_stations*n_period*n_zelements))
return mem_req*1E-9
| gpl-3.0 |
krez13/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/finance.py | 69 | 20558 | """
A collection of modules for collecting, analyzing and plotting
financial data. User contributions welcome!
"""
#from __future__ import division
import os, time, warnings
from urllib import urlopen
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
try: import datetime
except ImportError:
raise ImportError('The finance module requires datetime support (python2.3)')
import numpy as np
from matplotlib import verbose, get_configdir
from dates import date2num
from matplotlib.cbook import Bunch
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.colors import colorConverter
from lines import Line2D, TICKLEFT, TICKRIGHT
from patches import Rectangle
from matplotlib.transforms import Affine2D
configdir = get_configdir()
cachedir = os.path.join(configdir, 'finance.cache')
def parse_yahoo_historical(fh, asobject=False, adjusted=True):
"""
Parse the historical data in file handle fh from yahoo finance and return
results as a list of
d, open, close, high, low, volume
where d is a floating poing representation of date, as returned by date2num
if adjust=True, use adjusted prices
"""
results = []
lines = fh.readlines()
datefmt = None
for line in lines[1:]:
vals = line.split(',')
if len(vals)!=7: continue
datestr = vals[0]
if datefmt is None:
try:
datefmt = '%Y-%m-%d'
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
except ValueError:
datefmt = '%d-%b-%y' # Old Yahoo--cached file?
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
d = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = int(vals[5])
if adjusted:
aclose = float(vals[6])
m = aclose/close
open *= m
high *= m
low *= m
close = aclose
results.append((d, open, close, high, low, volume))
results.reverse()
if asobject:
if len(results)==0: return None
else:
date, open, close, high, low, volume = map(np.asarray, zip(*results))
return Bunch(date=date, open=open, close=close, high=high, low=low, volume=volume)
else:
return results
def fetch_historical_yahoo(ticker, date1, date2, cachename=None):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
Ex:
fh = fetch_historical_yahoo('^GSPC', d1, d2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
a file handle is returned
"""
ticker = ticker.upper()
d1 = (date1.month-1, date1.day, date1.year)
d2 = (date2.month-1, date2.day, date2.year)
urlFmt = 'http://table.finance.yahoo.com/table.csv?a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=d&ignore=.csv'
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker)
if cachename is None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if os.path.exists(cachename):
fh = file(cachename)
verbose.report('Using cachefile %s for %s'%(cachename, ticker))
else:
if not os.path.isdir(cachedir): os.mkdir(cachedir)
fh = file(cachename, 'w')
fh.write(urlopen(url).read())
fh.close()
verbose.report('Saved %s data to cache file %s'%(ticker, cachename))
fh = file(cachename, 'r')
return fh
def quotes_historical_yahoo(ticker, date1, date2, asobject=False, adjusted=True, cachename=None):
"""
Get historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
results are a list of tuples
(d, open, close, high, low, volume)
where d is a floating poing representation of date, as returned by date2num
if asobject is True, the return val is an object with attrs date,
open, close, high, low, volume, which are equal length arrays
if adjust=True, use adjusted prices
Ex:
sp = f.quotes_historical_yahoo('^GSPC', d1, d2, asobject=True, adjusted=True)
returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
[n,bins,patches] = hist(returns, 100)
mu = mean(returns)
sigma = std(returns)
x = normpdf(bins, mu, sigma)
plot(bins, x, color='red', lw=2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
"""
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try: ret = parse_yahoo_historical(fh, asobject, adjusted)
except IOError, exc:
warnings.warn('urlopen() failure\n' + url + '\n' + exc.strerror[1])
return None
return ret
def plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""
quotes is a list of (time, open, close, high, low, ...) tuples
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
time must be in float date format - see date2num
ax : an Axes instance to plot to
ticksize : open/close tick marker in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
lines = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open : color = colorup
else : color = colordown
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(
xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(
xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
quotes is a list of (time, open, close, high, low, ...) tuples.
As long as the first 5 elements of the tuples are these values,
the tuple can be as long as you want (eg it may store volume).
time must be in float days format - see date2num
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
ax : an Axes instance to plot to
width : fraction of a day for the rectangle width
colorup : the color of the rectangle where close >= open
colordown : the color of the rectangle where close < open
alpha : the rectangle alpha level
return value is lines, patches where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width/2.0
lines = []
patches = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open :
color = colorup
lower = open
height = close-open
else :
color = colordown
lower = close
height = open-close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color='k',
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy = (t-OFFSET, lower),
width = width,
height = height,
facecolor = color,
edgecolor = color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def plot_day_summary2(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
ax : an Axes instance to plot to
ticksize : size of open and close ticks in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [ ((-ticksize, 0), (0, 0)) ]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [ ((0, 0), (ticksize, 0)) ]
offsetsOpen = [ (i, open) for i, open in zip(xrange(len(opens)), opens) if open != -1 ]
offsetsClose = [ (i, close) for i, close in zip(xrange(len(closes)), closes) if close != -1 ]
scale = ax.figure.dpi * (1.0/72.0)
tickTransform = Affine2D().scale(scale, 0.0)
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,1
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,1
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(rangeSegments)==len(offsetsOpen))
assert(len(offsetsOpen)==len(offsetsClose))
assert(len(offsetsClose)==len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors = colors,
linewidths = lw,
antialiaseds = useAA,
)
openCollection = LineCollection(openSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsOpen,
transOffset = ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsClose,
transOffset = ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""
Represent the open, close as a bar line and high low range as a
vertical line.
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
return value is lineCollection, barCollection
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
delta = width/2.
barVerts = [ ( (i-delta, open), (i-delta, close), (i+delta, close), (i+delta, open) ) for i, open, close in zip(xrange(len(opens)), opens, closes) if open != -1 and close!=-1 ]
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(barVerts)==len(rangeSegments))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors = ( (0,0,0,1), ),
linewidths = lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
delta = width/2.
bars = [ ( (i-delta, 0), (i-delta, v), (i+delta, v), (i+delta, 0)) for i, v in enumerate(volumes) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = (0,),
linewidths = (0.5,),
)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
nb: first point is not displayed - it is used only for choosing the
right color
"""
return volume_overlay(ax,closes[:-1],closes[1:],volumes[1:],colorup,colordown,width,alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. quotes is a list of (d,
open, close, high, low, volume) and close-open is used to
determine the color of the bar
kwarg
width : the bar width in points
colorup : the color of the lines where close1 >= close0
colordown : the color of the lines where close1 < close0
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
dates, opens, closes, highs, lows, volumes = zip(*quotes)
colors = [colord[close1>=close0] for close0, close1 in zip(closes[:-1], closes[1:]) if close0!=-1 and close1 !=-1]
colors.insert(0,colord[closes[0]>=opens[0]])
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, volume), (right, volume), (right, 0)) for d, open, close, high, low, volume in quotes]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
dates = [d for d, open, close, high, low, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, close, high, low, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.get_bounds()
#print 'viewlim', ax.viewLim.get_bounds()
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""
Add a bar collection graph with height vals (-1 is missing).
ax : an Axes instance to plot to
width : the bar width in points
alpha : bar transparency
"""
facecolors = (colorConverter.to_rgba(facecolor, alpha),)
edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, v), (right, v), (right, 0)) for v in vals if v != -1 ]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
offsetsBars = [ (i, 0) for i,v in enumerate(vals) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = facecolors,
edgecolors = edgecolors,
antialiaseds = (0,),
linewidths = (0.5,),
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v!=-1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| agpl-3.0 |
WaltXon/pytroleum | pytroleum/pricing.py | 1 | 5608 | from collections import OrderedDict
import datetime
import calendar
import pandas as pd
from functions import adjust_date_to_EOM
from config import config
def pricing(price_type="strip", output_excel_file=""):
date_series = pd.date_range(
config["production_start_date"], periods=config["max_life_months"], freq="M"
)
df_time_series = pd.DataFrame(index=date_series)
df_time_series["date"] = df_time_series.index.format()
if price_type == "flat":
prices = pd.DataFrame(
index=df_time_series.index, columns=["price_oil", "price_gas", "price_ngl"]
)
prices["price_oil"] = config["price_oil_flat"]
prices["price_gas"] = config["price_gas_flat"]
prices["price_ngl"] = config["price_ngl_flat"]
elif price_type == "strip":
working_oil = []
for k, v in config["price_oil_strip"].iteritems():
working_oil.append((adjust_date_to_EOM(k), v))
working_gas = []
for k, v in config["price_gas_strip"].iteritems():
working_gas.append((adjust_date_to_EOM(k), v))
working_ngl = []
for k, v in config["price_ngl_strip"].iteritems():
working_ngl.append((adjust_date_to_EOM(k), v))
df_oil = pd.DataFrame(
[x[1] for x in working_oil],
index=[x[0] for x in working_oil],
columns=["price_oil"],
)
df_gas = pd.DataFrame(
[x[1] for x in working_gas],
index=[x[0] for x in working_gas],
columns=["price_gas"],
)
df_ngl = pd.DataFrame(
[x[1] for x in working_ngl],
index=[x[0] for x in working_ngl],
columns=["price_ngl"],
)
if df_ngl.empty == False:
prices = pd.concat([df_oil, df_gas, df_ngl, df_time_series], axis=1)
else:
prices = pd.concat([df_oil, df_gas, df_time_series], axis=1)
prices["price_ngl"] = 0.0
idx_last_oil_price = prices["price_oil"][
pd.notnull(prices["price_oil"])
].idxmax()
last_oil_price = prices.ix[idx_last_oil_price, "price_oil"]
idx_last_gas_price = prices["price_gas"][
pd.notnull(prices["price_gas"])
].idxmax()
last_gas_price = prices.ix[idx_last_gas_price, "price_gas"]
idx_last_ngl_price = prices["price_ngl"][
pd.notnull(prices["price_ngl"])
].idxmax()
last_ngl_price = prices.ix[idx_last_ngl_price, "price_ngl"]
prices = prices.fillna(0)
last_oil = [
last_oil_price,
]
last_gas = [
last_gas_price,
]
last_ngl = [
last_ngl_price,
]
for idx, row in prices.iterrows():
if row["price_oil"] == 0.0:
new_oil_price = last_oil[-1] * config["after_strip_escalator_oil"]
if new_oil_price >= config["price_oil_max"]:
prices.ix[idx, "price_oil"] = config["price_oil_max"]
last_oil.append(config["price_oil_max"])
else:
prices.ix[idx, "price_oil"] = new_oil_price
last_oil.append(new_oil_price)
if row["price_gas"] == 0.0:
new_gas_price = last_gas[-1] * config["after_strip_escalator_gas"]
if new_gas_price >= config["price_gas_max"]:
prices.ix[idx, "price_gas"] = config["price_gas_max"]
last_gas.append(config["price_gas_max"])
else:
prices.ix[idx, "price_gas"] = new_gas_price
last_gas.append(new_gas_price)
if row["price_ngl"] == 0.0:
new_ngl_price = last_gas[-1] * config["after_strip_escalator_ngl"]
if new_ngl_price >= config["price_ngl_max"]:
prices.ix[idx, "price_ngl"] = config["price_ngl_max"]
last_ngl.append(config["price_ngl_max"])
else:
prices.ix[idx, "price_ngl"] = new_ngl_price
last_ngl.append(new_ngl_price)
else:
print("pricing module, price_type unknown")
prices.to_excel(output_excel_file)
return prices
def dump_prices(oil_file, gas_file):
oil = pd.DataFrame(
[x[1] for x in config["price_oil"].items()],
index=[x[0] for x in config["price_oil"].items()],
)
oil.to_excel(oil_file)
oil = pd.DataFrame(
[x[1] for x in config["price_gas"].items()],
index=[x[0] for x in config["price_gas"].items()],
)
oil.to_excel(gas_file)
def update_oil_prices(oil_file, gas_file):
oil = pd.read_excel(oil_file)
oil_working = oil.to_dict()
oil_new = {}
for k, v in oil_working[0].iteritems():
if type(k) == datetime.datetime:
oil_new[datetime.datetime.strftime(k, "%m/%d/%Y")] = round(v, 2)
elif type(k) == unicode:
oil_new[str(k)] = round(v, 2)
else:
oil_new[k] = round(v, 2)
# config['price_oil']=oil_new
def update_gas_prices(oil_file, gas_file):
gas = pd.read_excel(gas_file)
gas_working = oil.to_dict()
gas_new = {}
for k, v in gas_working[0].iteritems():
if type(k) == datetime.datetime:
gas_new[datetime.datetime.strftime(k, "%m/%d/%Y")] = round(v, 2)
elif type(k) == unicode:
gas_new[str(k)] = round(v, 2)
else:
gas_new[k] = round(v, 2)
# config['price_gas']=new_gas
def update_prices():
update_oil_prices()
update_gas_prices()
| mit |
MPIBGC-TEE/CompartmentalSystems | prototypes/newOdeInterface/test.py | 1 | 3815 |
from concurrencytest import ConcurrentTestSuite, fork_for_tests
import sys
import unittest
import plotly.graph_objs as go
from plotly.offline import plot
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import numpy as np
from sympy import Symbol,Matrix, symbols, sin, Piecewise, DiracDelta, Function
from CompartmentalSystems.helpers_reservoir import factor_out_from_matrix, parse_input_function, melt, MH_sampling, stride, is_compartmental, func_subs, numerical_function_from_expression,pe
from CompartmentalSystems.start_distributions import \
start_age_moments_from_empty_spinup, \
start_age_moments_from_steady_state, \
start_age_moments_from_zero_initial_content, \
compute_fixedpoint_numerically, \
start_age_distributions_from_steady_state, \
start_age_distributions_from_empty_spinup, \
start_age_distributions_from_zero_initial_content
from CompartmentalSystems.smooth_reservoir_model import SmoothReservoirModel
from CompartmentalSystems.smooth_model_run import SmoothModelRun
from testinfrastructure.InDirTest import InDirTest
C_0, C_1 = symbols('C_0 C_1')
state_vector = [C_0, C_1]
t = Symbol('t')
f_expr = Function('f')(t)
input_fluxes = {0: C_0*f_expr+2, 1: 2}
output_fluxes = {0: C_0, 1: C_1}
internal_fluxes = {(0,1):0.5*C_0**3}
srm = SmoothReservoirModel(state_vector, t, input_fluxes, output_fluxes, internal_fluxes)
parameter_set={}
def f_func( t_val):
return np.sin(t_val)+1.0
func_set = {f_expr: f_func}
t_min = 0
t_max = 2*np.pi
n_steps=11
times = np.linspace(t_min,t_max,n_steps)
# create a model run that starts with all pools empty
smr = SmoothModelRun(srm, parameter_set=parameter_set, start_values=np.zeros(srm.nr_pools), times=times,func_set=func_set)
# choose a t_0 somewhere in the times
t0_index = int(n_steps/2)
t0 = times[t0_index]
a_dens_func_t0,pool_contents=start_age_distributions_from_empty_spinup(srm,t_max=t0,parameter_set=parameter_set,func_set=func_set)
pe('pool_contents',locals())
# construct a function p that takes an age array "ages" as argument
# and gives back a three-dimensional ndarray (ages x times x pools)
# from the a array-valued function representing the start age density
p=smr.pool_age_densities_func(start_age_distributions_from_zero_initial_content(srm))
# for this particular example we are only interrested in ages that are smaller than t_max
# the particular choice ages=times means that t_0_ind is the same in both arrays
ages=times
t0_age_index=t0_index
pool_dens_data=p(ages)
n =0
fig=smr.plot_3d_density_plotly("pool {0}".format(n),pool_dens_data[:,:,n],ages)
# plot the computed start age density for t0 on top
trace_on_surface = go.Scatter3d(
x=np.array([-t0 for a in ages]),
y=np.array([a for a in ages]),
z=np.array([a_dens_func_t0(a)[n] for a in ages]),
mode = 'lines',
line=dict(
color='#FF0000',
width=15
)
#,
#showlegend = legend_on_surface
)
#smr.add_equilibrium_surface_plotly(fig)
fig.add_scatter3d(
x=np.array([-t0 for a in ages]),
y=np.array([a for a in ages]),
z=np.array([a_dens_func_t0(a)[n] for a in ages]),
mode = 'lines',
line=dict(
color='#FF0000',
width=15
)
)
#plot(fig,filename="test_{0}.html".format(n),auto_open=False)
plot(fig,filename="test_{0}.html".format(n))
# make sure that the values for the model run at t0 conince with the values computed by the # function returned by the function under test
res_data=np.array([a_dens_func_t0(a)[n] for a in ages])
ref_data=pool_dens_data[:,t0_index,n]
self.assertTrue(np.allclose(res_data,ref_data,rtol=1e-3))
# make sure that the density is zero for all values of age bigger than t0
self.assertTrue(np.all(res_data[t0_age_index:]==0))
| mit |
rrohan/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 93 | 2471 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/backend_bases.py | 6 | 111196 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes such as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
:class:`ShowBase`
The base class for the Show class of each interactive backend;
the 'show' callable is then set to Show.__call__, inherited from
ShowBase.
:class:`ToolContainerBase`
The base class for the Toolbar class of each interactive backend.
:class:`StatusbarBase`
The base class for the messaging area.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from contextlib import contextmanager
import six
from six.moves import xrange
import os
import sys
import warnings
import time
import io
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
#import matplotlib.path as path
from matplotlib import rcParams
from matplotlib import is_interactive
from matplotlib import get_backend
from matplotlib._pylab_helpers import Gcf
from matplotlib import lines
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import matplotlib.tight_bbox as tight_bbox
import matplotlib.textpath as textpath
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation, warn_deprecated
import matplotlib.backend_tools as tools
try:
from importlib import import_module
except:
# simple python 2.6 implementation (no relative imports)
def import_module(name):
__import__(name)
return sys.modules[name]
try:
from PIL import Image
_has_pil = True
del Image
except ImportError:
_has_pil = False
_default_filetypes = {
'ps': 'Postscript',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
_default_backends = {
'ps': 'matplotlib.backends.backend_ps',
'eps': 'matplotlib.backends.backend_ps',
'pdf': 'matplotlib.backends.backend_pdf',
'pgf': 'matplotlib.backends.backend_pgf',
'png': 'matplotlib.backends.backend_agg',
'raw': 'matplotlib.backends.backend_agg',
'rgba': 'matplotlib.backends.backend_agg',
'svg': 'matplotlib.backends.backend_svg',
'svgz': 'matplotlib.backends.backend_svg',
}
def register_backend(format, backend, description=None):
"""
Register a backend for saving to a given file format.
format : str
File extention
backend : module string or canvas class
Backend for handling file output
description : str, optional
Description of the file type. Defaults to an empty string
"""
if description is None:
description = ''
_default_backends[format] = backend
_default_filetypes[format] = description
def get_registered_canvas_class(format):
"""
Return the registered default canvas for given file format.
Handles deferred import of required backend.
"""
if format not in _default_backends:
return None
backend_class = _default_backends[format]
if cbook.is_string_like(backend_class):
backend_class = import_module(backend_class).FigureCanvas
_default_backends[format] = backend_class
return backend_class
class ShowBase(object):
"""
Simple base class to generate a show() callable in backends.
Subclass must override mainloop() method.
"""
def __call__(self, block=None):
"""
Show all figures. If *block* is not None, then
it is a boolean that overrides all other factors
determining whether show blocks by calling mainloop().
The other factors are:
it does not block if run inside ipython's "%pylab" mode
it does not block in interactive mode.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show()
if block is not None:
if block:
self.mainloop()
return
else:
return
# Hack: determine at runtime whether we are
# inside ipython in pylab mode.
from matplotlib import pyplot
try:
ipython_pylab = not pyplot.show._needmain
# IPython versions >= 0.10 tack the _needmain
# attribute onto pyplot.show, and always set
# it to False, when in %pylab mode.
ipython_pylab = ipython_pylab and get_backend() != 'WebAgg'
# TODO: The above is a hack to get the WebAgg backend
# working with ipython's `%pylab` mode until proper
# integration is implemented.
except AttributeError:
ipython_pylab = False
# Leave the following as a separate step in case we
# want to control this behavior with an rcParam.
if ipython_pylab:
return
if not is_interactive() or get_backend() == 'WebAgg':
self.mainloop()
def mainloop(self):
pass
class RendererBase(object):
"""An abstract base class to handle drawing/rendering operations.
The following methods must be implemented in the backend for full
functionality (though just implementing :meth:`draw_path` alone would
give a highly capable backend):
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_gouraud_triangle`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_text`
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
self._text2path = textpath.TextToPath()
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`.
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
"""
Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
:meth:`draw_path`. Some backends may want to override this in
order to render each set of path data only once, and then
reference that path multiple times with the different offsets,
colors, styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transforms.Affine2D(transform)))
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_ids, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
path, transform = path_id
transform = transforms.Affine2D(
transform.get_matrix()).translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], np.float_)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangle(self, gc, points, colors, transform):
"""
Draw a Gouraud-shaded triangle.
*points* is a 3x2 array of (x, y) points for the triangle.
*colors* is a 3x4 array of RGBA colors for each point of the
triangle.
*transform* is an affine transform to apply to the points.
"""
raise NotImplementedError
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draws a series of Gouraud triangles.
*points* is a Nx3x2 array of (x, y) points for the trianglex.
*colors* is a Nx3x4 array of RGBA colors for each point of the
triangles.
*transform* is an affine transform to apply to the points.
"""
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = Affine2D(all_transforms[i % Ntransforms])
yield path, transform + master_transform
def _iter_collection_uses_per_path(self, paths, all_transforms,
offsets, facecolors, edgecolors):
"""
Compute how many times each raw path object returned by
_iter_collection_raw_paths would be used when calling
_iter_collection. This is intended for the backend to decide
on the tradeoff between using the paths in-line and storing
them once and reusing. Rounds up in case the number of uses
is not the same for every path.
"""
Npaths = len(paths)
if Npaths == 0 or (len(facecolors) == 0 and len(edgecolors) == 0):
return 0
Npath_ids = max(Npaths, len(all_transforms))
N = max(Npath_ids, len(offsets))
return (N + Npath_ids - 1) // Npath_ids
def _iter_collection(self, gc, master_transform, all_transforms,
path_ids, offsets, offsetTrans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Ntransforms = len(all_transforms)
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc0 = self.new_gc()
gc0.copy_properties(gc)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if offset_position == 'data':
if Ntransforms:
transform = (
Affine2D(all_transforms[i % Ntransforms]) +
master_transform)
else:
transform = master_transform
xo, yo = transform.transform_point((xo, yo))
xp, yp = transform.transform_point((0, 0))
xo = -(xp - xo)
yo = -(yp - yo)
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc0.set_dashes(*linestyles[i % Nlinestyles])
fg = edgecolors[i % Nedgecolors]
if len(fg) == 4:
if fg[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(fg)
else:
gc0.set_foreground(fg)
if rgbFace is not None and len(rgbFace) == 4:
if rgbFace[3] == 0:
rgbFace = None
gc0.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc0.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc0, rgbFace
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im, transform=None):
"""
Draw an RGBA image.
*gc*
a :class:`GraphicsContextBase` instance with clipping information.
*x*
the distance in physical units (i.e., dots or pixels) from the left
hand side of the canvas.
*y*
the distance in physical units (i.e., dots or pixels) from the
bottom side of the canvas.
*im*
An NxMx4 array of RGBA pixels (of dtype uint8).
*transform*
If and only if the concrete backend is written such that
:meth:`option_scale_image` returns ``True``, an affine
transformation *may* be passed to :meth:`draw_image`. It takes the
form of a :class:`~matplotlib.transforms.Affine2DBase` instance.
The translation vector of the transformation is given in physical
units (i.e., dots or pixels). Note that the transformation does not
override `x` and `y`, and has to be applied *before* translating
the result by `x` and `y` (this can be accomplished by adding `x`
and `y` to the translation vector defined by `transform`).
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
override this method for renderers that do not necessarily always
want to rescale and composite raster images. (like SVG, PDF, or PS)
"""
return False
def option_scale_image(self):
"""
override this method for renderers that support arbitrary affine
transformations in :meth:`draw_image` (most vector backends).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text baseline in display coords
*s*
the text string
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
*mtext*
a :class:`matplotlib.text.Text` instance
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be plotted along with
your text.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
"""
return the text path and transform
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
if ismath == "TeX":
verts, codes = text2path.get_text_path(prop, s, ismath=False,
usetex=True)
else:
verts, codes = text2path.get_text_path(prop, s, ismath=ismath,
usetex=False)
path = Path(verts, codes)
angle = angle / 180. * 3.141592
if self.flipy():
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, self.height - y)
else:
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, y)
return path, transform
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
path, transform = self._get_text_path_transform(
x, y, s, prop, angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
if ismath == 'TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self._text2path.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
size = prop.get_size_in_points()
font.set_size(size, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, e.g., postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
"""
Used in MixedModeRenderer. Switch to the raster renderer.
"""
pass
def stop_rasterizing(self):
"""
Used in MixedModeRenderer. Switch back to the vector renderer
and draw the contents of the raster renderer as an image on
the vector renderer.
"""
pass
def start_filter(self):
"""
Used in AggRenderer. Switch to a temporary renderer for image
filtering effects.
"""
pass
def stop_filter(self, filter_func):
"""
Used in AggRenderer. Switch back to the original renderer.
The contents of the temporary renderer is processed with the
*filter_func* and is drawn on the original renderer as an
image.
"""
pass
class GraphicsContextBase(object):
"""
An abstract base class that provides color, line styles, etc...
"""
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'round'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._hatch_color = colors.to_rgba(rcParams['hatch.color'])
self._hatch_linewidth = rcParams['hatch.linewidth']
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack
"""
pass
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox`
instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<https://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three or four floats from 0-1.
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_gid(self):
"""
Return the object identifier if one is set, None otherwise.
"""
return self._gid
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._rgb, isRGBA=True)
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b:
self._antialiased = 1
else:
self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
if path is not None and not isinstance(path,
transforms.TransformedPath):
msg = ("Path should be a matplotlib.transforms.TransformedPath"
"instance.")
raise ValueError(msg)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points.
``(None, None)`` specifies a solid line
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl <= 0.0):
raise ValueError("All values in the dash list must be positive")
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color. fg can be a MATLAB format string, a
html hex color string, an rgb or rgba unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
If you know fg is rgba, set ``isRGBA=True`` for efficiency.
"""
if self._forced_alpha and isRGBA:
self._rgb = fg[:3] + (self._alpha,)
elif self._forced_alpha:
self._rgb = colors.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
# When removing, remember to remove all overrides in subclasses.
msg = ("set_graylevel is deprecated for removal in 1.6; "
"you can achieve the same result by using "
"set_foreground((frac, frac, frac))")
warnings.warn(msg, mplDeprecation)
self._rgb = (frac, frac, frac, self._alpha)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = float(w)
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted'). These are defined in the rcParams
`lines.dashed_pattern`, `lines.dashdot_pattern` and
`lines.dotted_pattern`. One may also specify customized dash
styles by providing a tuple of (offset, dash pairs).
"""
self._linestyle = style
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_gid(self, id):
"""
Sets the id.
"""
self._gid = id
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
def get_hatch_path(self, density=6.0):
"""
Returns a Path for the current hatch.
"""
if self._hatch is None:
return None
return Path.hatch(self._hatch, density)
def get_hatch_color(self):
"""
Gets the color to use for hatching.
"""
return self._hatch_color
def get_hatch_linewidth(self):
"""
Gets the linewidth to use for hatching.
"""
return self._hatch_linewidth
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
class TimerBase(object):
'''
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Mandatory functions that must be implemented:
* `_timer_start`: Contains backend-specific code for starting
the timer
* `_timer_stop`: Contains backend-specific code for stopping
the timer
Optional overrides:
* `_timer_set_single_shot`: Code for setting the timer to
single shot operating mode, if supported by the timer
object. If not, the `Timer` class itself will store the flag
and the `_on_timer` method should be overridden to support
such behavior.
* `_timer_set_interval`: Code for setting the interval on the
timer, if there is a method for doing so on the timer
object.
* `_on_timer`: This is the internal function that any timer
object should call, which will handle the task of running
all callbacks that have been set.
Attributes:
* `interval`: The time between timer events in
milliseconds. Default is 1000 ms.
* `single_shot`: Boolean flag indicating whether this timer
should operate as single shot (run once and then
stop). Defaults to `False`.
* `callbacks`: Stores list of (func, args) tuples that will be
called upon timer events. This list can be manipulated
directly, or the functions `add_callback` and
`remove_callback` can be used.
'''
def __init__(self, interval=None, callbacks=None):
#Initialize empty callbacks list and setup default settings if necssary
if callbacks is None:
self.callbacks = []
else:
self.callbacks = callbacks[:] # Create a copy
if interval is None:
self._interval = 1000
else:
self._interval = interval
self._single = False
# Default attribute for holding the GUI-specific timer object
self._timer = None
def __del__(self):
'Need to stop timer and possibly disconnect timer.'
self._timer_stop()
def start(self, interval=None):
'''
Start the timer object. `interval` is optional and will be used
to reset the timer interval first if provided.
'''
if interval is not None:
self._set_interval(interval)
self._timer_start()
def stop(self):
'''
Stop the timer.
'''
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
def _get_interval(self):
return self._interval
def _set_interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
interval = int(interval)
self._interval = interval
self._timer_set_interval()
interval = property(_get_interval, _set_interval)
def _get_single_shot(self):
return self._single
def _set_single_shot(self, ss=True):
self._single = ss
self._timer_set_single_shot()
single_shot = property(_get_single_shot, _set_single_shot)
def add_callback(self, func, *args, **kwargs):
'''
Register `func` to be called by timer when the event fires. Any
additional arguments provided will be passed to `func`.
'''
self.callbacks.append((func, args, kwargs))
def remove_callback(self, func, *args, **kwargs):
'''
Remove `func` from list of callbacks. `args` and `kwargs` are optional
and used to distinguish between copies of the same function registered
to be called with different arguments.
'''
if args or kwargs:
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
'Used to set interval on underlying timer object.'
pass
def _timer_set_single_shot(self):
'Used to set single shot on underlying timer object.'
pass
def _on_timer(self):
'''
Runs all function that have been registered as callbacks. Functions
can return False (or 0) if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
'''
for func, args, kwargs in self.callbacks:
ret = func(*args, **kwargs)
# docstring above explains why we use `if ret == False` here,
# instead of `if not ret`.
if ret == False:
self.callbacks.remove((func, args, kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event(object):
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas, guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""
An event triggered by a figure being closed
In addition to the :class:`Event` attributes, the following event
attributes are defined:
"""
def __init__(self, name, canvas, guiEvent=None):
Event.__init__(self, name, canvas, guiEvent)
class LocationEvent(Event):
"""
An event that has a screen location
The following additional attributes are defined and shown with
their default values.
In addition to the :class:`Event` attributes, the following
event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y, guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas, guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
if self.canvas.mouse_grabber is None:
axes_list = [a for a in self.canvas.figure.get_axes()
if a.in_axes(self)]
else:
axes_list = [self.canvas.mouse_grabber]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axes_list.sort(key=lambda x: x.zorder)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
trans = self.inaxes.transData.inverted()
xdata, ydata = trans.transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes != self.inaxes:
# process axes enter/leave events
try:
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
except:
pass
# See ticket 2901582.
# I think this is a valid exception to the rule
# against catching all exceptions; if anything goes
# wrong, we simply want to move on and process the
# current event.
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events). Note that in the nbagg backend, both the
middle and right clicks return 3 since right clicking will bring
up the context menu in some browsers.
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
dblclick = None # whether or not the event is the result of a double click
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
self.dblclick = dblclick
def __str__(self):
return ("MPL MouseEvent: xy=(%d,%d) xydata=(%s,%s) button=%s " +
"dblclick=%s inaxes=%s") % (self.x, self.y, self.xdata,
self.ydata, self.button,
self.dblclick, self.inaxes)
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- e.g., a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
ax.plot(np.rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
line = event.artist
xdata, ydata = line.get_data()
ind = event.ind
print('on pick line:', np.array([xdata[ind], ydata[ind]]).T)
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist,
guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key(s) pressed. Could be **None**, a single case sensitive ascii
character ("g", "G", "#", etc.), a special key
("control", "shift", "f1", "up", etc.) or a
combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G").
.. note::
Modifier keys will be prefixed to the pressed key and will be in the
order "ctrl", "alt", "super". The exception to this rule is when the
pressed key is itself a modifier key, therefore "ctrl+alt" and
"alt+control" can both be valid key values.
Example usage::
def on_key(event):
print('you pressed', event.key, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase(object):
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
supports_blit = True
fixed_dpi = None
filetypes = _default_filetypes
if _has_pil:
# JPEG support
register_backend('jpg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
register_backend('jpeg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
# TIFF support
register_backend('tif', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
register_backend('tiff', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
def __init__(self, figure):
self._is_idle_drawing = True
self._is_saving = False
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry()
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event', self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)
self.mouse_grabber = None # the axes currently grabbing mouse
self.toolbar = None # NavigationToolbar2 will set me
self._is_idle_drawing = False
@contextmanager
def _idle_draw_cntx(self):
self._is_idle_drawing = True
yield
self._is_idle_drawing = False
def is_saving(self):
"""
Returns `True` when the renderer is in the process of saving
to a file, rather than rendering for an on-screen buffer.
"""
return self._is_saving
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
# Find the top artist under the cursor
under = self.figure.hitlist(ev)
under.sort(key=lambda x: x.zorder)
h = None
if under:
h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
msg = ("onHilite has been deprecated in 1.5 and will be removed "
"in 1.6. This function has not been used internally by mpl "
"since 2007.")
warnings.warn(msg, mplDeprecation)
if not hasattr(self, '_active'):
self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
# On leave restore the captured colour
for a in leave:
if hasattr(a, 'get_color'):
a.set_color(self._active[a])
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a, 'get_color'):
self._active[a] = a.get_color()
elif hasattr(a, 'get_edgecolor'):
self._active[a] = (a.get_edgecolor(), a.get_facecolor())
else:
self._active[a] = None
for a in enter:
if hasattr(a, 'get_color'):
a.set_color('red')
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else:
self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def close_event(self, guiEvent=None):
"""
This method will be called by all functions connected to the
'close_event' with a :class:`CloseEvent`
"""
s = 'close_event'
try:
event = CloseEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
except (TypeError, AttributeError):
pass
# Suppress the TypeError when the python session is being killed.
# It may be that a better solution would be a mechanism to
# disconnect all callbacks upon shutdown.
# AttributeError occurs on OSX with qt4agg upon exiting
# with an open window; 'callbacks' attribute no longer exists.
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist,
guiEvent=mouseevent.guiEvent,
**kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key,
dblclick=dblclick, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
self._lastx, self._lasty = None, None
def enter_notify_event(self, guiEvent=None, xy=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
*xy*
the coordinate location of the pointer when the canvas is
entered
"""
if xy is not None:
x, y = xy
self._lastx, self._lasty = x, y
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
"""Called when GUI is idle."""
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def grab_mouse(self, ax):
"""
Set the child axes which are currently grabbing the mouse events.
Usually called by the widgets themselves.
It is an error to call this if the mouse is already grabbed by
another axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError('two different attempted to grab mouse input')
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the axes, ax.
Usually called by the widgets.
It is ok to call this even if you ax doesn't have the mouse
grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
if not self._is_idle_drawing:
with self._idle_draw_cntx():
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
Return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
@classmethod
def get_supported_filetypes(cls):
"""Return dict of savefig file formats supported by this backend"""
return cls.filetypes
@classmethod
def get_supported_filetypes_grouped(cls):
"""Return a dict of savefig file formats supported by this backend,
where the keys are a file type name, such as 'Joint Photographic
Experts Group', and the values are a list of filename extensions used
for that filetype, such as ['jpg', 'jpeg']."""
groupings = {}
for ext, name in six.iteritems(cls.filetypes):
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def _get_output_canvas(self, format):
"""Return a canvas that is suitable for saving figures to a specified
file format. If necessary, this function will switch to a registered
backend that supports the format.
"""
method_name = 'print_%s' % format
# check if this canvas supports the requested format
if hasattr(self, method_name):
return self
# check if there is a default canvas for the requested format
canvas_class = get_registered_canvas_class(format)
if canvas_class:
return self.switch_backends(canvas_class)
# else report error for unsupported format
formats = sorted(self.get_supported_filetypes())
raise ValueError('Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
def print_figure(self, filename, dpi=None, facecolor=None, edgecolor=None,
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure; if None, defaults to savefig.facecolor
*edgecolor*
the edgecolor of the figure; if None, defaults to savefig.edgecolor
*orientation*
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
*bbox_inches*
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
*pad_inches*
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
*bbox_extra_artists*
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
self._is_saving = True
if format is None:
# get format from filename, or from backend's default filetype
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
# get canvas object and print method for format
canvas = self._get_output_canvas(format)
print_method = getattr(canvas, 'print_%s' % format)
if dpi is None:
dpi = rcParams['savefig.dpi']
if dpi == 'figure':
dpi = self.figure.dpi
if facecolor is None:
facecolor = rcParams['savefig.facecolor']
if edgecolor is None:
edgecolor = rcParams['savefig.edgecolor']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
bbox_inches = kwargs.pop("bbox_inches", None)
if bbox_inches is None:
bbox_inches = rcParams['savefig.bbox']
if bbox_inches:
# call adjust_bbox to save only the given area
if bbox_inches == "tight":
# when bbox_inches == "tight", it saves the figure
# twice. The first save command is just to estimate
# the bounding box of the figure. A stringIO object is
# used as a temporary file object, but it causes a
# problem for some backends (ps backend with
# usetex=True) if they expect a filename, not a
# file-like object. As I think it is best to change
# the backend to support file-like object, i'm going
# to leave it as it is. However, a better solution
# than stringIO seems to be needed. -JJL
#result = getattr(self, method_name)
result = print_method(
io.BytesIO(),
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
dryrun=True,
**kwargs)
renderer = self.figure._cachedRenderer
bbox_inches = self.figure.get_tightbbox(renderer)
bbox_artists = kwargs.pop("bbox_extra_artists", None)
if bbox_artists is None:
bbox_artists = self.figure.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox,
clip_path.get_extents())
if bbox is not None and (bbox.width != 0 or
bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / self.figure.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
pad = kwargs.pop("pad_inches", None)
if pad is None:
pad = rcParams['savefig.pad_inches']
bbox_inches = bbox_inches.padded(pad)
restore_bbox = tight_bbox.adjust_bbox(self.figure, bbox_inches,
canvas.fixed_dpi)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
try:
#result = getattr(self, method_name)(
result = print_method(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
self._is_saving = False
#self.figure.canvas.draw() ## seems superfluous
return result
@classmethod
def get_default_filetype(cls):
"""
Get the default savefig file format as specified in rcParam
``savefig.format``. Returned string excludes period. Overridden
in backends that only support a single file type.
"""
return rcParams['savefig.format']
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
return self.manager.get_window_title()
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def get_default_filename(self):
"""
Return a string, which includes extension, suitable for use as
a default filename.
"""
default_basename = self.get_window_title() or 'image'
default_basename = default_basename.lower().replace(' ', '_')
default_filetype = self.get_default_filetype()
default_filename = default_basename + '.' + default_filetype
save_dir = os.path.expanduser(rcParams.get('savefig.directory', ''))
# ensure non-existing filename in save dir
i = 1
while os.path.isfile(os.path.join(save_dir, default_filename)):
# attach numerical count to basename
default_filename = '{0}-{1}.{2}'.format(default_basename, i, default_filetype)
i += 1
return default_filename
def switch_backends(self, FigureCanvasClass):
"""
Instantiate an instance of FigureCanvasClass
This is used for backend switching, e.g., to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (e.g., setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
newCanvas._is_saving = self._is_saving
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = canvas.mpl_connect('button_press_event', on_press)
"""
if s == 'idle_event':
warn_deprecated(1.5,
"idle_event is only implemented for the wx backend, and will "
"be removed in matplotlib 2.1. Use the animations module "
"instead.")
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
Disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only for
backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerBase(*args, **kwargs)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self, timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str, mplDeprecation)
if timeout <= 0:
timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter * timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
"""
self._looping = False
def key_press_handler(event, canvas, toolbar=None):
"""
Implement the default mpl key bindings for the canvas and toolbar
described at :ref:`key-event-handling`
*event*
a :class:`KeyEvent` instance
*canvas*
a :class:`FigureCanvasBase` instance
*toolbar*
a :class:`NavigationToolbar2` instance
"""
# these bindings happen whether you are over an axes or not
if event.key is None:
return
# Load key-mappings from your matplotlibrc file.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
quit_keys = rcParams['keymap.quit']
grid_keys = rcParams['keymap.grid']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
all = rcParams['keymap.all_axes']
# toggle fullscreen mode ('f', 'ctrl + f')
if event.key in fullscreen_keys:
try:
canvas.manager.full_screen_toggle()
except AttributeError:
pass
# quit the figure (defaut key 'ctrl+w')
if event.key in quit_keys:
Gcf.destroy_fig(canvas.figure)
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in home_keys:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
toolbar.pan()
toolbar._set_cursor(event)
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
toolbar.zoom()
toolbar._set_cursor(event)
# saving current figure (default key 's')
elif event.key in save_keys:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an axes to trigger
# switching on/off a grid in current axes (default key 'g')
if event.key in grid_keys:
event.inaxes.grid()
canvas.draw()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
ax = event.inaxes
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
try:
ax.set_yscale('log')
except ValueError as exc:
warnings.warn(str(exc))
ax.set_yscale('linear')
ax.figure.canvas.draw()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
ax = event.inaxes
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif scalex == 'linear':
try:
ax.set_xscale('log')
except ValueError:
warnings.warn(str(exc))
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif (event.key.isdigit() and event.key != '0') or event.key in all:
# keys in list 'all' enables all axes (default key 'a'),
# otherwise if key is a number only enable this particular axes
# if it was the axes, where the event was raised
if not (event.key in all):
n = int(event.key) - 1
for i, a in enumerate(canvas.figure.get_axes()):
# consider axes, in which the event was raised
# FIXME: Why only this axes?
if event.x is not None and event.y is not None \
and a.in_axes(event):
if event.key in all:
a.set_navigate(True)
else:
a.set_navigate(i == n)
class NonGuiException(Exception):
pass
class FigureManagerBase(object):
"""
Helper class for pyplot mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure number
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
if rcParams['toolbar'] != 'toolmanager':
self.key_press_handler_id = self.canvas.mpl_connect(
'key_press_event',
self.key_press)
else:
self.key_press_handler_id = None
"""
The returned id from connecting the default key handler via
:meth:`FigureCanvasBase.mpl_connnect`.
To disable default key press handling::
manager, canvas = figure.canvas.manager, figure.canvas
canvas.mpl_disconnect(manager.key_press_handler_id)
"""
def show(self):
"""
For GUI backends, show the figure window and redraw.
For non-GUI backends, raise an exception to be caught
by :meth:`~matplotlib.figure.Figure.show`, for an
optional warning.
"""
raise NonGuiException()
def destroy(self):
pass
def full_screen_toggle(self):
pass
def resize(self, w, h):
""""For gui backends, resize the window (in pixels)."""
pass
def key_press(self, event):
"""
Implement the default mpl key bindings defined at
:ref:`key-event-handling`
"""
if rcParams['toolbar'] != 'toolmanager':
key_press_handler(event, self.canvas, self.canvas.toolbar)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None for non-GUI backends (e.g., a PS backend).
"""
return 'image'
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect for non-GUI backends (e.g., a PS backend).
"""
pass
cursors = tools.cursors
class NavigationToolbar2(object):
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
# list of toolitems to add to the toolbar, format is:
# (
# text, # the text of the button (often not visible to users)
# tooltip_text, # the tooltip shown on hover (where possible)
# image_file, # name of the image for the button (without the extension)
# name_of_method, # name of the method in NavigationToolbar2 to call
# )
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
(None, None, None, None),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time
# of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
self._ids_zoom = []
self._zoom_mode = None
self._button_pressed = None # determined by the button pressed
# at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
"""Display a message on toolbar or in status bar"""
pass
def back(self, *args):
"""move back up the view lim stack"""
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
"""Draw a rectangle rubberband to indicate zoom limits"""
pass
def remove_rubberband(self):
"""Remove the rubberband"""
pass
def forward(self, *args):
"""Move forward in the view lim stack"""
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
"""Restore the original view"""
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def _set_cursor(self, event):
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active == 'ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
elif (self._active == 'PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
def mouse_move(self, event):
self._set_cursor(event)
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
artists = [a for a in event.inaxes.mouseover_set
if a.contains(event) and a.get_visible()]
if artists:
a = max(artists, key=lambda x: x.zorder)
if a is not event.inaxes.patch:
data = a.get_cursor_data(event)
if data is not None:
s += ' [%s]' % a.format_cursor_data(data)
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pan(self, *args):
"""Activate the pan/zoom tool. pan with left button, zoom with right"""
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
"""Called whenver a mouse button is pressed."""
pass
def press_pan(self, event):
"""the press mouse button in pan/zoom mode callback"""
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.drag_pan)
self.press(event)
def press_zoom(self, event):
"""the press mouse button in zoom to rect mode callback"""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self.release(event)
self.draw()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a._get_view()))
id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
id2 = self.canvas.mpl_connect('key_press_event',
self._switch_on_zoom_mode)
id3 = self.canvas.mpl_connect('key_release_event',
self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
self.press(event)
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self.mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self.mouse_move(event)
def push_current(self):
"""push the current view limits and position onto the stack"""
views = []
pos = []
for a in self.canvas.figure.get_axes():
views.append(a._get_view())
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self._views.push(views)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
"""this will be called whenever mouse button is released"""
pass
def release_pan(self, event):
"""the release mouse button callback in pan/zoom mode"""
if self._button_pressed is None:
return
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress:
return
self._xypress = []
self._button_pressed = None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
"""the drag callback in pan/zoom mode"""
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def drag_zoom(self, event):
"""the drag callback in zoom mode"""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.draw_rubberband(event, x, y, lastx, lasty)
def release_zoom(self, event):
"""the release mouse button callback in zoom to rect mode"""
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
self.remove_rubberband()
if not self._xypress:
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
# allows the user to "cancel" a zoom action
# by zooming by less than 5 pixels
if ((abs(x - lastx) < 5 and self._zoom_mode!="y") or
(abs(y - lasty) < 5 and self._zoom_mode!="x")):
self._xypress = None
self.release(event)
self.draw()
return
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if self._button_pressed == 1:
direction = 'in'
elif self._button_pressed == 3:
direction = 'out'
else:
continue
a._set_view_from_bbox((lastx, lasty, x, y), direction,
self._zoom_mode, twinx, twiny)
self.draw()
self._xypress = None
self._button_pressed = None
self._zoom_mode = None
self.push_current()
self.release(event)
def draw(self):
"""Redraw the canvases, update the locators"""
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw_idle()
def _update_view(self):
"""Update the viewlim and position from the view and
position stack for each axes
"""
views = self._views()
if views is None:
return
pos = self._positions()
if pos is None:
return
for i, a in enumerate(self.canvas.figure.get_axes()):
a._set_view(views[i])
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.canvas.draw_idle()
def save_figure(self, *args):
"""Save the current figure"""
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
"""Reset the axes stack"""
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
"""Activate zoom to rect mode"""
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event',
self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event',
self.release_zoom)
self.mode = 'zoom rect'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
"""Enable or disable back/forward button"""
pass
class ToolContainerBase(object):
"""
Base class for all tool containers, e.g. toolbars.
Attributes
----------
toolmanager : `ToolManager` object that holds the tools that
this `ToolContainer` wants to communicate with.
"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
self.toolmanager.toolmanager_connect('tool_removed_event',
self._remove_tool_cbk)
def _tool_toggled_cbk(self, event):
"""
Captures the 'tool_trigger_[name]'
This only gets used for toggled tools
"""
self.toggle_toolitem(event.tool.name, event.tool.toggled)
def add_tool(self, tool, group, position=-1):
"""
Adds a tool to this container
Parameters
----------
tool : tool_like
The tool to add, see `ToolManager.get_tool`.
group : str
The name of the group to add this tool to.
position : int (optional)
The position within the group to place this tool. Defaults to end.
"""
tool = self.toolmanager.get_tool(tool)
image = self._get_image_filename(tool.image)
toggle = getattr(tool, 'toggled', None) is not None
self.add_toolitem(tool.name, group, position,
image, tool.description, toggle)
if toggle:
self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
self._tool_toggled_cbk)
def _remove_tool_cbk(self, event):
"""Captures the 'tool_removed_event' signal and removes the tool"""
self.remove_toolitem(event.tool.name)
def _get_image_filename(self, image):
"""Find the image based on its name"""
# TODO: better search for images, they are not always in the
# datapath
basedir = os.path.join(rcParams['datapath'], 'images')
if image is not None:
fname = os.path.join(basedir, image)
else:
fname = None
return fname
def trigger_tool(self, name):
"""
Trigger the tool
Parameters
----------
name : String
Name(id) of the tool triggered from within the container
"""
self.toolmanager.trigger_tool(name, sender=self)
def add_toolitem(self, name, group, position, image, description, toggle):
"""
Add a toolitem to the container
This method must get implemented per backend
The callback associated with the button click event,
must be **EXACTLY** `self.trigger_tool(name)`
Parameters
----------
name : string
Name of the tool to add, this gets used as the tool's ID and as the
default label of the buttons
group : String
Name of the group that this tool belongs to
position : Int
Position of the tool within its group, if -1 it goes at the End
image_file : String
Filename of the image for the button or `None`
description : String
Description of the tool, used for the tooltips
toggle : Bool
* `True` : The button is a toggle (change the pressed/unpressed
state between consecutive clicks)
* `False` : The button is a normal button (returns to unpressed
state after release)
"""
raise NotImplementedError
def toggle_toolitem(self, name, toggled):
"""
Toggle the toolitem without firing event
Parameters
----------
name : String
Id of the tool to toggle
toggled : bool
Whether to set this tool as toggled or not.
"""
raise NotImplementedError
def remove_toolitem(self, name):
"""
Remove a toolitem from the `ToolContainer`
This method must get implemented per backend
Called when `ToolManager` emits a `tool_removed_event`
Parameters
----------
name : string
Name of the tool to remove
"""
raise NotImplementedError
class StatusbarBase(object):
"""Base class for the statusbar"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
self.toolmanager.toolmanager_connect('tool_message_event',
self._message_cbk)
def _message_cbk(self, event):
"""Captures the 'tool_message_event' and set the message"""
self.set_message(event.message)
def set_message(self, s):
"""
Display a message on toolbar or in status bar
Parameters
----------
s : str
Message text
"""
pass
| apache-2.0 |
stevenzhang18/Indeed-Flask | lib/pandas/io/tests/test_cparser.py | 9 | 12962 | """
C/Cython ascii file parser tests
"""
from pandas.compat import StringIO, BytesIO, map
from datetime import datetime
from pandas import compat
import csv
import os
import sys
import re
import nose
from numpy import nan
import numpy as np
from pandas import DataFrame, Series, Index, isnull, MultiIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextParser, TextFileReader)
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network)
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
import pandas.util.testing as tm
from pandas.parser import TextReader
import pandas.parser as parser
class TestCParser(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f)
result = reader.read()
finally:
f.close()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
result = reader.read()
def test_file_handle_mmap(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f, memory_map=True, header=None)
result = reader.read()
finally:
f.close()
def test_StringIO(self):
text = open(self.csv1, 'rb').read()
src = BytesIO(text)
reader = TextReader(src, header=None)
result = reader.read()
def test_string_factorize(self):
# should this be optional?
data = 'a\nb\na\nb\na'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
self.assertEqual(len(set(map(id, result[0]))), 2)
def test_skipinitialspace(self):
data = ('a, b\n'
'a, b\n'
'a, b\n'
'a, b')
reader = TextReader(StringIO(data), skipinitialspace=True,
header=None)
result = reader.read()
self.assert_numpy_array_equal(result[0], ['a', 'a', 'a', 'a'])
self.assert_numpy_array_equal(result[1], ['b', 'b', 'b', 'b'])
def test_parse_booleans(self):
data = 'True\nFalse\nTrue\nTrue'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
self.assertEqual(result[0].dtype, np.bool_)
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True,
header=None)
result = reader.read()
self.assert_numpy_array_equal(result[0], ['a', 'a', 'a'])
self.assert_numpy_array_equal(result[1], ['b', 'b', 'b'])
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = ['a', 'hello\nthere', 'this']
self.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = '12345,67\n345,678'
reader = TextReader(StringIO(data), delimiter=':',
decimal=',', header=None)
result = reader.read()
expected = [12345.67, 345.678]
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = '123,456\n12,500'
reader = TextReader(StringIO(data), delimiter=':',
thousands=',', header=None)
result = reader.read()
expected = [123456, 12500]
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = '123.456\n12.500'
reader = TextFileReader(StringIO(data), delimiter=':',
thousands='.', header=None)
result = reader.read()
expected = [123456, 12500]
tm.assert_almost_equal(result[0], expected)
def test_skip_bad_lines(self):
# too many lines, see #2430 for why
data = ('a:b:c\n'
'd:e:f\n'
'g:h:i\n'
'j:k:l:m\n'
'l:m:n\n'
'o:p:q:r')
reader = TextReader(StringIO(data), delimiter=':',
header=None)
self.assertRaises(parser.CParserError, reader.read)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=False)
result = reader.read()
expected = {0: ['a', 'd', 'g', 'l'],
1: ['b', 'e', 'h', 'm'],
2: ['c', 'f', 'i', 'n']}
assert_array_dicts_equal(result, expected)
stderr = sys.stderr
sys.stderr = StringIO()
try:
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=True)
reader.read()
val = sys.stderr.getvalue()
self.assertTrue('Skipping line 4' in val)
self.assertTrue('Skipping line 6' in val)
finally:
sys.stderr = stderr
def test_header_not_enough_lines(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2)
header = reader.header
expected = [['a', 'b', 'c']]
self.assertEqual(header, expected)
recs = reader.read()
expected = {0 : [1, 4], 1 : [2, 5], 2 : [3, 6]}
assert_array_dicts_equal(expected, recs)
# not enough rows
self.assertRaises(parser.CParserError, TextReader, StringIO(data),
delimiter=',', header=5, as_recarray=True)
def test_header_not_enough_lines_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest("segfaults on win-64, only when all tests are run")
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2,
as_recarray=True)
header = reader.header
expected = [['a', 'b', 'c']]
self.assertEqual(header, expected)
recs = reader.read()
expected = {'a': [1, 4], 'b': [2, 5], 'c': [3, 6]}
assert_array_dicts_equal(expected, recs)
# not enough rows
self.assertRaises(parser.CParserError, TextReader, StringIO(data),
delimiter=',', header=5, as_recarray=True)
def test_escapechar(self):
data = ('\\"hello world\"\n'
'\\"hello world\"\n'
'\\"hello world\"')
reader = TextReader(StringIO(data), delimiter=',', header=None,
escapechar='\\')
result = reader.read()
expected = {0: ['"hello world"'] * 3}
assert_array_dicts_equal(result, expected)
def test_eof_has_eol(self):
# handling of new line at EOF
pass
def test_na_substitution(self):
pass
def test_numpy_string_dtype(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S5,i4')
result = reader.read()
self.assertEqual(result[0].dtype, 'S5')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaaa'], dtype='S5')
self.assertTrue((result[0] == ex_values).all())
self.assertEqual(result[1].dtype, 'i4')
reader = _make_reader(dtype='S4')
result = reader.read()
self.assertEqual(result[0].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
self.assertTrue((result[0] == ex_values).all())
self.assertEqual(result[1].dtype, 'S4')
def test_numpy_string_dtype_as_recarray(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
if compat.is_platform_windows():
raise nose.SkipTest("segfaults on win-64, only when all tests are run")
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S4', as_recarray=True)
result = reader.read()
self.assertEqual(result['0'].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
self.assertTrue((result['0'] == ex_values).all())
self.assertEqual(result['1'].dtype, 'S4')
def test_pass_dtype(self):
data = """\
one,two
1,a
2,b
3,c
4,d"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(dtype={'one': 'u1', 1: 'S1'})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'S1')
reader = _make_reader(dtype={'one': np.uint8, 1: object})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'O')
reader = _make_reader(dtype={'one': np.dtype('u1'),
1: np.dtype('O')})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'O')
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(usecols=(1, 2))
result = reader.read()
exp = _make_reader().read()
self.assertEqual(len(result), 2)
self.assertTrue((result[1] == exp[1]).all())
self.assertTrue((result[2] == exp[2]).all())
def test_cr_delimited(self):
def _test(text, **kwargs):
nice_text = text.replace('\r', '\r\n')
result = TextReader(StringIO(text), **kwargs).read()
expected = TextReader(StringIO(nice_text), **kwargs).read()
assert_array_dicts_equal(result, expected)
data = 'a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12'
_test(data, delimiter=',')
data = 'a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12'
_test(data, delim_whitespace=True)
data = 'a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12'
_test(data, delimiter=',')
sample = ('A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r'
'AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r'
',BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0')
_test(sample, delimiter=',')
data = 'A B C\r 2 3\r4 5 6'
_test(data, delim_whitespace=True)
data = 'A B C\r2 3\r4 5 6'
_test(data, delim_whitespace=True)
def test_empty_field_eof(self):
data = 'a,b,c\n1,2,3\n4,,'
result = TextReader(StringIO(data), delimiter=',').read()
expected = {0: np.array([1, 4]),
1: np.array(['2', ''], dtype=object),
2: np.array(['3', ''], dtype=object)}
assert_array_dicts_equal(result, expected)
# GH5664
a = DataFrame([['b'], [nan]], columns=['a'], index=['a', 'c'])
b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]],
columns=list('abcd'),
index=[1, 1])
c = DataFrame([[1, 2, 3, 4], [6, nan, nan, nan],
[8, 9, 10, 11], [13, 14, nan, nan]],
columns=list('abcd'),
index=[0, 5, 7, 12])
for _ in range(100):
df = read_csv(StringIO('a,b\nc\n'), skiprows=0,
names=['a'], engine='c')
assert_frame_equal(df, a)
df = read_csv(StringIO('1,1,1,1,0\n'*2 + '\n'*2),
names=list("abcd"), engine='c')
assert_frame_equal(df, b)
df = read_csv(StringIO('0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14'),
names=list('abcd'), engine='c')
assert_frame_equal(df, c)
def assert_array_dicts_equal(left, right):
for k, v in compat.iteritems(left):
assert(np.array_equal(v, right[k]))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
quimaguirre/diana | scripts/old_scripts/classify_drug_combinations.py | 1 | 22397 | import argparse
import cPickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pylab
import scipy
import time
import sys, os, re
from context import diana
import diana.classes.comparison as diana_comparison
import diana.classes.analysis as diana_analysis
def main():
options = parse_user_arguments()
analysis_results(options)
def parse_user_arguments(*args, **kwds):
"""
Parses the arguments of the program
"""
parser = argparse.ArgumentParser(
description = "Generate the profiles of the input drug",
epilog = "@oliva's lab 2017")
parser.add_argument('-th','--threshold_list',dest='threshold_list',action = 'store',
help = """List of percentages that will be used as cut-offs to define the profiles of the drugs. It has to be a file containing:
- Different numbers that will be the threshold values separated by newline characters.
For example, a file called "top_threshold.list" containing:
0.1
0.5
1
5
10
""")
parser.add_argument('-f','--formula',dest='formula',action = 'store',default='simpson',
help = """Define the formula used to classify. It can be: simpson, jaccard""")
parser.add_argument('-se','--consider_se',dest='consider_se',action = 'store_true',
help = """" Consider Side Effects / ATCs. """)
parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'),
help = """Define the workspace directory where the data directory and the results directory will be created""")
options=parser.parse_args()
return options
#################
#################
# MAIN FUNCTION #
#################
#################
def analysis_results(options):
"""
Analyzes the results of the comparisons
"""
# Start marker for time measure
start = time.time()
print("\n\t\t-------------------------------------------------------------------------------------------------------------------------------\n")
print("\t\tStarting Drug Interactions ANAlysis (DIANA), a program created by @OLIVA'S LAB. Analysis of results: Classify drug combinations\n")
print("\t\t-------------------------------------------------------------------------------------------------------------------------------\n")
# Get the script path
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
toolbox_dir = os.path.join(main_path, 'diana/toolbox')
# Check the directory of the profiles, comparisons and analysis
data_dir = os.path.join(options.workspace, "profiles")
check_directory(data_dir)
results_dir = os.path.join(options.workspace, "comparisons")
check_directory(results_dir)
analysis_dir = os.path.join(options.workspace, "analysis")
check_directory(analysis_dir)
# Get the list of thresholds to create the profiles
if options.threshold_list and fileExist(options.threshold_list):
threshold_list = get_values_from_threshold_file(options.threshold_list)
else:
threshold_list = [1, 5, 10, 20, 50]
# Do we consider Side Effects/ATC?
if options.consider_se:
consider_se = True
else:
consider_se = False
# Get the names of the columns
columns = diana_analysis.obtain_columns(threshold_list, ATC_SE=consider_se)
#-----------------------------------------------------#
# PARSE THE RESULTS AND CREATE A PANDAS DATAFRAME #
#-----------------------------------------------------#
pair2comb_file = os.path.join(toolbox_dir, 'pair2comb.pcl')
pair2comb = cPickle.load(open(pair2comb_file))
diana_id_to_drugbank_file = os.path.join(toolbox_dir, 'diana_id_to_drugbank.pcl')
diana_id_to_drugbank = cPickle.load(open(diana_id_to_drugbank_file))
ddi = sum(1 for x in pair2comb.values() if x == 1)
non_ddi = sum(1 for x in pair2comb.values() if x == 0)
print('NUMBER OF DRUG COMBINATIONS:\t\t{}\n'.format(ddi))
print('NUMBER OF NON-DRUG COMBINATIONS:\t{}\n'.format(non_ddi))
output_dataframe = os.path.join(analysis_dir, 'dcdb_comparisons.csv')
if not fileExist(output_dataframe):
# Create a data frame to store the results
df = pd.DataFrame(columns=columns)
# Obtain all the results subfolders of the results main folder
results_dir_list = [f for f in os.listdir(results_dir) if os.path.isdir(os.path.join(results_dir, f))]
for comparison in results_dir_list:
drug_id1, drug_id2 = comparison.split('---')
comparison_dir = os.path.join(results_dir, comparison)
results_table = os.path.join(comparison_dir, 'results_table.tsv')
# Add the Comb field (if it is drug combination or not)
drug1 = diana_id_to_drugbank[drug_id1].upper()
drug2 = diana_id_to_drugbank[drug_id2].upper()
comparison_without_id = '{}---{}'.format(drug1, drug2)
if comparison_without_id in pair2comb:
combination_field = pair2comb[comparison_without_id]
else:
print('The comparison {} is not in the pair2comb dictionary!\n'.format(comparison_without_id))
print(pair2comb)
sys.exit(10)
if not fileExist(results_table):
print('The comparison {} has not been executed properly!\n'.format(comparison))
sys.exit(10)
results = diana_analysis.get_results_from_table(results_table, columns, combination_field)
df2 = pd.DataFrame([results], columns=columns, index=[comparison])
# Add the information to the main data frame
df = df.append(df2)
# Output the Pandas dataframe in a CSV file
df.to_csv(output_dataframe)
else:
df = pd.read_csv(output_dataframe, index_col=0)
#---------------------------#
# REMOVE MISSING VALUES #
#---------------------------#
# Replace the None values in dcstructure by nan
if 'None' in df['dcstructure']:
df = df.replace(to_replace={'dcstructure':{'None':np.nan}})
# Remove the nan values in dcstructure
df = df.dropna()
# Count the number of drug combinations / non-drug combinations
dc_data = df[df['combination'] == 1]
ndc_data = df[df['combination'] == 0]
num_dc = len(dc_data.index)
num_ndc = len(ndc_data.index)
print('Number of drug combinations after removing missing values:\t{}\n'.format(num_dc))
print('Number of non-drug combinations after removing missing values:\t{}\n'.format(num_ndc))
#---------------------------#
# IDENTIFY ME-TOO DRUGS #
#---------------------------#
me_too_dir = os.path.join(analysis_dir, 'me_too_drugs')
create_directory(me_too_dir)
me_too_drugs_table = os.path.join(me_too_dir, 'me_too_drugs.tsv')
me_too_drug_combs_table = os.path.join(me_too_dir, 'me_too_drug_combinations.tsv')
me_too_drug_pairs_file = os.path.join(me_too_dir, 'me_too_drug_pairs.pcl')
me_too_drug_comb_pairs_file = os.path.join(me_too_dir, 'me_too_drug_comb_pairs.pcl')
if not fileExist(me_too_drug_pairs_file) or not fileExist(me_too_drug_comb_pairs_file):
df_struc = df[['dcstructure']]
df_struc = df_struc.astype(float)
me_too_drug_pairs, me_too_drug_comb_pairs = diana_analysis.obtain_me_too_drugs_and_combinations(df_struc, columns, me_too_drugs_table, me_too_drug_combs_table)
cPickle.dump(me_too_drug_pairs, open(me_too_drug_pairs_file, 'w'))
cPickle.dump(me_too_drug_comb_pairs, open(me_too_drug_comb_pairs_file, 'w'))
else:
me_too_drug_pairs = cPickle.load(open(me_too_drug_pairs_file))
me_too_drug_comb_pairs = cPickle.load(open(me_too_drug_comb_pairs_file))
# Process me-too drug combination pairs
me_too_drug_combinations = set()
drug_pair_to_me_too_times = {}
for pair in me_too_drug_comb_pairs:
drug_comb1, drug_comb2 = pair.split('___')
me_too_drug_combinations.add(frozenset([drug_comb1, drug_comb2]))
drug_pair_to_me_too_times.setdefault(drug_comb1, 0)
drug_pair_to_me_too_times.setdefault(drug_comb2, 0)
drug_pair_to_me_too_times[drug_comb1] += 1
drug_pair_to_me_too_times[drug_comb2] += 1
removed_drug_pairs = set()
for pair in me_too_drug_comb_pairs:
drug_comb1, drug_comb2 = pair.split('___')
if drug_comb1 in removed_drug_pairs or drug_comb2 in removed_drug_pairs:
continue
if drug_pair_to_me_too_times[drug_comb1] > drug_pair_to_me_too_times[drug_comb2]:
removed_drug_pairs.add(drug_comb1)
else:
removed_drug_pairs.add(drug_comb2)
# Remove the drug pairs which appear in me-too pairs of drug pairs more times
df = df.loc[~df.index.isin(list(removed_drug_pairs))]
# Count the number of drug combinations / non-drug combinations
dc_data = df[df['combination'] == 1]
ndc_data = df[df['combination'] == 0]
num_dc = len(dc_data.index)
num_ndc = len(ndc_data.index)
print('Number of drug combinations after removing me-too conflictive drug pairs:\t{}\n'.format(num_dc))
print('Number of non-drug combinations after removing me-too conflictive drug pairs:\t{}\n'.format(num_ndc))
img_dir = os.path.join(analysis_dir, 'figures')
create_directory(img_dir)
fig_format = 'png'
#-----------------------------------------------------#
# PLOT DISTRIBUTION OF NUMBER OF TARGETS PER DRUG #
#-----------------------------------------------------#
# Plot distribution of comparisons of targets
drugbank2targets_file = os.path.join(toolbox_dir, 'drugbank_to_targets.pcl')
drugbank_to_targets = cPickle.load(open(drugbank2targets_file))
plot_distribution_targets = os.path.join(img_dir, 'distribution_number_targets.{}'.format(fig_format))
targets = [len(x) for x in drugbank_to_targets.values()]
n, bins, patches = plt.hist(np.array(targets), bins=50, weights=np.zeros_like(np.array(targets)) + 1. / np.array(targets).size, facecolor='r')
plt.xlabel('Number of targets per drug')
plt.ylabel('Relative frequency')
plt.title('Distribution of the number of targets per drug')
plt.savefig(plot_distribution_targets, format=fig_format, dpi=300)
plt.clf()
#----------------------------------------------------------------------------------------------#
# EVALUATE OVERLAP BETWEEN TARGETS, BIOLOGICAL PROCESSES AND PATHWAYS IN DRUG COMBINATIONS #
#----------------------------------------------------------------------------------------------#
tables_dir = os.path.join(analysis_dir, 'tables')
create_directory(tables_dir)
if options.formula != 'jaccard' and options.formula != 'simpson':
print('Please, introduce a correct formula to classify drug combinations: jaccard or simpson!\n')
sys.exit(10)
# Plot of distribution of comparisons of Targets
plot_ji_targets = os.path.join(img_dir, 'distribution_{}_index_targets.{}'.format(options.formula, fig_format))
# Plot of distribution of comparisons of Biological Processes
plot_ji_bp = os.path.join(img_dir, 'distribution_{}_index_biological_processes.{}'.format(options.formula, fig_format))
# Plot of distribution of comparisons of Pathways
plot_ji_pathways = os.path.join(img_dir, 'distribution_{}_index_pathways.{}'.format(options.formula, fig_format))
# Output pickle file of the classification
classification_targets_bp_file = os.path.join(toolbox_dir, 'classification_targets_bp.pcl')
classification_targets_pathways_file = os.path.join(toolbox_dir, 'classification_targets_pathways.pcl')
# Get the classification files
drug_int_2_drugs_file = os.path.join(toolbox_dir, 'drug_int_2_drugs.pcl')
drug_int_2_drugs = cPickle.load(open(drug_int_2_drugs_file))
drug_int_2_info_file = os.path.join(toolbox_dir, 'drug_int_2_info.pcl')
drug_int_2_info = cPickle.load(open(drug_int_2_info_file))
drugbank_to_dcdb_file = os.path.join(toolbox_dir, 'drugbank_to_dcdb.pcl')
drugbank_to_dcdb = cPickle.load(open(drugbank_to_dcdb_file))
bio_processes_file = os.path.join(toolbox_dir, 'target_to_bio_processes.pcl')
target_to_bio_processes = cPickle.load(open(bio_processes_file))
pathways_file = os.path.join(toolbox_dir, 'target_to_pathways.pcl')
target_to_pathways = cPickle.load(open(pathways_file))
target_comparisons = []
bp_comparisons = []
pathway_comparisons = []
dc_to_target_ji = {}
dc_to_bp_ji = {}
dc_to_pathway_ji = {}
all_drugs = set()
for index, row in dc_data.iterrows():
(drug_id1, drug_id2) = index.split('---')
drug1 = diana_id_to_drugbank[drug_id1].upper()
drug2 = diana_id_to_drugbank[drug_id2].upper()
all_drugs.add(drug1)
all_drugs.add(drug2)
if drug1 in drugbank_to_targets and drug2 in drugbank_to_targets:
targets1 = drugbank_to_targets[drug1]
targets2 = drugbank_to_targets[drug2]
if options.formula == 'jaccard':
result_targets = diana_comparison.calculate_jaccard_index(targets1, targets2)
elif options.formula == 'simpson':
result_targets = diana_comparison.calculate_simpson_index(targets1, targets2)
target_comparisons.append(result_targets)
dc_to_target_ji[index] = result_targets
bio_proc1 = get_results_from_dict_of_sets(targets1, target_to_bio_processes)
bio_proc2 = get_results_from_dict_of_sets(targets2, target_to_bio_processes)
if options.formula == 'jaccard':
result_bp = diana_comparison.calculate_jaccard_index(bio_proc1, bio_proc2)
elif options.formula == 'simpson':
result_bp = diana_comparison.calculate_simpson_index(bio_proc1, bio_proc2)
bp_comparisons.append(result_bp)
dc_to_bp_ji[index] = result_bp
pathways1 = get_results_from_dict_of_sets(targets1, target_to_pathways)
pathways2 = get_results_from_dict_of_sets(targets2, target_to_pathways)
if options.formula == 'jaccard':
result_pathways = diana_comparison.calculate_jaccard_index(pathways1, pathways2)
elif options.formula == 'simpson':
result_pathways = diana_comparison.calculate_simpson_index(pathways1, pathways2)
pathway_comparisons.append(result_pathways)
dc_to_pathway_ji[index] = result_pathways
# Plot distribution of comparisons of targets
n, bins, patches = plt.hist(np.array(target_comparisons), bins=50, weights=np.zeros_like(np.array(target_comparisons)) + 1. / np.array(target_comparisons).size, facecolor='r')
plt.xlabel('{} Index of Targets'.format(options.formula.capitalize()))
plt.ylabel('Relative frequency')
plt.title('Distribution of {} Index of Targets in drug combinations'.format(options.formula.capitalize()))
plt.savefig(plot_ji_targets, format=fig_format, dpi=300)
plt.clf()
# Plot distribution of comparisons of biological processes
n, bins, patches = plt.hist(np.array(bp_comparisons), bins=50, weights=np.zeros_like(np.array(bp_comparisons)) + 1. / np.array(bp_comparisons).size, facecolor='b')
plt.xlabel('{} Index of Biological Processes'.format(options.formula.capitalize()))
plt.ylabel('Relative frequency')
plt.title('Distribution of {} Index of Biological Processes in drug combinations'.format(options.formula.capitalize()))
plt.savefig(plot_ji_bp, format=fig_format, dpi=300)
plt.clf()
# Plot distribution of comparisons of pathways
n, bins, patches = plt.hist(np.array(pathway_comparisons), bins=50, weights=np.zeros_like(np.array(pathway_comparisons)) + 1. / np.array(pathway_comparisons).size, facecolor='g')
plt.xlabel('{} Index of Pathways'.format(options.formula.capitalize()))
plt.ylabel('Relative frequency')
plt.title('Distribution of {} Index of Pathways in drug combinations'.format(options.formula.capitalize()))
plt.savefig(plot_ji_pathways, format=fig_format, dpi=300)
plt.clf()
#------------------------------------#
# CLASSIFY THE DRUG COMBINATIONS #
#------------------------------------#
# Similar targets --> ji > 0.25
# Different targets --> ji <= 0.25
target_cut_off = 0.5
# Similar biological processes --> ji >= 0.25
# Different biological processes --> ji < 0.25
bp_cut_off = 0.5
# Similar pathways --> ji >= 0.5
# Different pathways --> ji < 0.5
pathway_cut_off = 0.5
classification_tar_bp = {}
st = 0
dt = 0
st_sbp = 0
st_dbp = 0
dt_sbp = 0
dt_dbp = 0
for dc in dc_to_target_ji:
# Classify by targets and biological processes
if dc in dc_to_bp_ji:
ji_tar = dc_to_target_ji[dc]
ji_bp = dc_to_bp_ji[dc]
if ji_tar > target_cut_off:
classification_tar_bp[dc] = 'similar_targets'
st += 1
if ji_bp > bp_cut_off:
st_sbp += 1
elif ji_bp <= bp_cut_off:
st_dbp += 1
elif ji_tar <= target_cut_off:
dt += 1
if ji_bp > bp_cut_off:
dt_sbp += 1
classification_tar_bp[dc] = 'different_targets_similar_bp'
elif ji_bp <= bp_cut_off:
dt_dbp += 1
classification_tar_bp[dc] = 'different_targets_different_bp'
print('Similar targets {}: similar bp {}, diff bp {}\n'.format(st, st_sbp, st_dbp))
print('Different targets {}: similar bp {}, diff bp {}\n'.format(dt, dt_sbp, dt_dbp))
cPickle.dump(classification_tar_bp, open(classification_targets_bp_file, 'w'))
classification_tar_pathway = {}
st = 0
dt = 0
st_spath = 0
st_dpath = 0
dt_spath = 0
dt_dpath = 0
for dc in dc_to_target_ji:
# Classify by targets and biological processes
if dc in dc_to_pathway_ji:
ji_tar = dc_to_target_ji[dc]
ji_path = dc_to_pathway_ji[dc]
if ji_tar > target_cut_off:
classification_tar_pathway[dc] = 'similar_targets'
st += 1
if ji_path > pathway_cut_off:
st_spath += 1
elif ji_path <= pathway_cut_off:
st_dpath += 1
elif ji_tar <= target_cut_off:
dt += 1
if ji_path > pathway_cut_off:
dt_spath += 1
classification_tar_pathway[dc] = 'different_targets_similar_pathways'
elif ji_path <= pathway_cut_off:
dt_dpath += 1
classification_tar_pathway[dc] = 'different_targets_different_pathways'
print('Similar targets {}: similar pathways {}, diff pathways {}\n'.format(st, st_spath, st_dpath))
print('Different targets {}: similar pathways {}, diff pathways {}\n'.format(dt, dt_spath, dt_dpath))
cPickle.dump(classification_tar_pathway, open(classification_targets_pathways_file, 'w'))
# Get number of drugs in drug combinations per number of targets
targets = [len(drugbank_to_targets[drug]) for drug in drugbank_to_targets if drug in all_drugs]
numtargets_to_numdrugs = {}
for target in targets:
numtargets_to_numdrugs.setdefault(target, 0)
numtargets_to_numdrugs[target] += 1
print('Number of drugs in drug combination: {}. Divided by four: {}'.format(len(all_drugs), len(all_drugs)/4))
for numtar, numdrug in sorted(numtargets_to_numdrugs.iteritems(), key=lambda (x, y): x, reverse = True):
print(numtar, numdrug)
# End marker for time
end = time.time()
print('\n DIANA INFO:\tTIME OF EXECUTION: {:.3f} seconds or {:.3f} minutes.\n'.format(end - start, (end - start) / 60))
return
#######################
#######################
# SECONDARY FUNCTIONS #
#######################
#######################
def fileExist(file):
"""
Checks if a file exists AND is a file
"""
return os.path.exists(file) and os.path.isfile(file)
def check_file(file):
"""
Checks if a file exists and if not, raises FileNotFound exception
"""
if not fileExist(file):
raise FileNotFound(file)
def create_directory(directory):
"""
Checks if a directory exists and if not, creates it
"""
try:
os.stat(directory)
except:
os.mkdir(directory)
return
def check_directory(directory):
"""
Checks if a directory exists and if not, raises DirNotFound exception
"""
try:
os.stat(directory)
except:
raise DirNotFound(directory)
class FileNotFound(Exception):
"""
Exception raised when a file is not found.
"""
def __init__(self, file):
self.file = file
def __str__(self):
return 'The file {} has not been found.\nTherefore, the comparison cannot be performed. Please, check that all the profiles have been correctly generated.\n'.format(self.file)
class DirNotFound(Exception):
"""
Exception raised when a directory is not found.
"""
def __init__(self, directory):
self.directory = directory
def __str__(self):
return 'The directory {} has not been found.\nTherefore, the comparison cannot be performed. Please, check that all the parameters have been correctly introduced and the profiles have been correctly generated.\n'.format(self.directory)
def get_results_from_dict_of_sets(list_of_elements, dict_of_sets):
"""
We have a list of elements that are in a dict of elements, and every element have a set with results.
We want to extract the results corresponding to our elements.
"""
results = set()
for element in list_of_elements:
if element in dict_of_sets:
for result in dict_of_sets[element]:
results.add(result)
return results
if __name__ == "__main__":
main()
| mit |
francesco-mannella/dmp-esn | parametric/parametric_dmp/bin/tr_datasets/e_cursive_curves_angles_none_none/plot.py | 8 | 2377 | #!/usr/bin/env python
import glob
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
import sys
matplotlib.use("cairo")
pathname = os.path.dirname(sys.argv[0])
if pathname:
os.chdir(pathname)
coords = None
if len(sys.argv) == 5:
try :
coords = [ float(sys.argv[x]) for x in range(1,5) ]
except ValueError:
pass
n_dim = None
def get_trajectories(pattern):
trs = []
names = glob.glob(pattern)
names.sort()
for fname in names:
t = np.loadtxt(fname)
trs.append(t)
return trs
trains = get_trajectories("trajectories/tl*")
tests = get_trajectories("trajectories/tt*")
train_results = get_trajectories("results/rtl*")
test_results = get_trajectories("results/rtt*")
ltrain = None
ltest = None
lall = []
idcs = np.arange(len(trains))
theo_train = {
'color': [.6,.6,1],
'lw': 5,
'zorder': 2,
'label': "Training"
}
repr_train = {
'color': [0,0,.3],
'lw': 1.5,
'zorder': 3,
'label': "Training repr."
}
theo_test = {
'color': [1,.6,.6],
'lw': 5,
'zorder': 2,
'label': "Test"
}
repr_test = {
'color': [.3,0,0],
'lw': 1.5,
'zorder': 3,
'label': "Test repr"
}
def common_plot(ax, d, label, color, lw, zorder):
h, = ax.plot(d[:,1]+d[:,7]*6, d[:,2]+d[:,8]*6,
color=color, lw=lw, zorder=zorder,
label=label)
return h
def plot_trajectories(ax, ttype, lall, **kargs):
idcs = np.arange(len(ttype))
for d,i in zip(ttype, idcs):
if i == 0:
lplot = common_plot(ax, d, **kargs)
lall.append(lplot)
else:
common_plot(ax, d, **kargs)
fig = plt.figure("DMP Stulp", figsize=(8,8))
ax = fig.add_subplot(111, aspect="equal")
plot_trajectories(ax, trains, lall, **theo_train)
plot_trajectories(ax, train_results, lall, **repr_train)
plot_trajectories(ax, tests, lall, **theo_test)
plot_trajectories(ax, test_results, lall, **repr_test)
print coords
if coords == None:
ax.set_xlim([-0.5,10.2])
ax.set_ylim([-0.5,7.2])
else:
ax.set_xlim([coords[0], coords[1]])
ax.set_ylim([coords[2], coords[3]])
ax.set_xticks([])
ax.set_yticks([])
ax.legend(handles=lall)
plt.tight_layout()
plt.show()
| gpl-2.0 |
daler/encode-dataframe | encode_dataframe/__init__.py | 1 | 2310 | import os
import glob
import pandas
def mirror_metadata_files(genome, basedir='.'):
"""
Mirrors all of the `files.txt` files from the assembly's encodeDCC section
on UCSC's servers.
`genome` is the assembly name (hg19, mm9)
Supply an optional `basedir` to download the data somewhere else.
"""
cmds = """
(
cd {basedir} &&
wget \\
-r \\
-A "files.txt" \\
"http://hgdownload.cse.ucsc.edu/goldenPath/{genome}/encodeDCC" \\
-e robots=off \\
-R "*html*" \\
-I "/goldenPath/{genome}/encodeDCC" \\
-np \\
-l 2
)
""".format(**locals())
print(cmds)
os.system(cmds)
def metadata_to_dataframe(fn):
"""
Converts a single `files.txt` file to a pandas.DataFrame.
"""
data = []
for line in open(fn):
d = {}
toks = line.strip().split('\t')
filename, kvs = toks
url = os.path.join(
'http://' + os.path.dirname(fn[fn.index('hgdownload.cse.ucsc.edu'):]),
filename)
d['url'] = url
d['filename'] = filename
for kv in kvs.split('; '):
k, v = kv.split('=', 1)
d[k] = v
data.append(d)
return pandas.DataFrame(data)
def encode_dataframe(genome, basename='.'):
"""
Returns a large pandas.DataFrame containing metadata from all identified
ENCODE data for the assembly.
Specifically, this concatenates all of the parsed `files.txt` files into
a single data frame. Assumes `mirror_metadata_files` has been called
already to mirror data.
"""
filenames = glob.glob(
os.path.join(
basename,
'hgdownload.cse.ucsc.edu/goldenPath/'
'{genome}/encodeDCC/*/files.txt'.format(genome=genome)
)
)
dfs = [metadata_to_dataframe(fn) for fn in filenames]
df = pandas.concat(dfs)
df.index = df.filename
return df
if __name__ == "__main__":
df = encode_dataframe('mm9')
interesting = (
(df.cell == 'MEL')
& (df.type == 'bam')
& (df.treatment != 'DMSO_2.0pct')
& (df.dataType.isin(['ChipSeq', 'DnaseSeq']))
& (df.replicate == '1')
& df.objStatus.isnull()
)
m = df.ix[interesting]
| mit |
arjoly/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
pratapvardhan/pandas | pandas/core/arrays/datetimes.py | 1 | 29028 | # -*- coding: utf-8 -*-
import warnings
import numpy as np
from pytz import utc
from pandas._libs import tslib
from pandas._libs.tslib import Timestamp, NaT, iNaT
from pandas._libs.tslibs import (
conversion, fields, timezones,
resolution as libresolution)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
_NS_DTYPE,
is_datetime64tz_dtype,
is_datetime64_dtype,
_ensure_int64)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.tseries.frequencies import to_offset, DateOffset
from .datetimelike import DatetimeLikeArrayMixin
def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
if self.tz is not utc:
values = self._local_timestamps()
if field in self._bool_ops:
if field.endswith(('start', 'end')):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get('startingMonth', kwds.get('month', 12))
result = fields.get_start_end_field(values, field,
self.freqstr, month_kw)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(result, convert='float64')
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArrayMixin(DatetimeLikeArrayMixin):
"""
Assumes that subclass __new__/__init__ defines:
tz
_freq
_data
"""
_bool_ops = ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'is_leap_year']
_object_ops = ['weekday_name', 'freq', 'tz']
# -----------------------------------------------------------------
# Constructors
_attributes = ["freq", "tz"]
@classmethod
def _simple_new(cls, values, freq=None, tz=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
if getattr(values, 'dtype', None) is None:
# empty, but with dtype compat
if values is None:
values = np.empty(0, dtype=_NS_DTYPE)
return cls(values, freq=freq, tz=tz, **kwargs)
values = np.array(values, copy=False)
if not is_datetime64_dtype(values):
values = _ensure_int64(values).view(_NS_DTYPE)
result = object.__new__(cls)
result._data = values
result._freq = freq
tz = timezones.maybe_get_tz(tz)
result._tz = timezones.tz_standardize(tz)
return result
def __new__(cls, values, freq=None, tz=None):
if (freq is not None and not isinstance(freq, DateOffset) and
freq != 'infer'):
freq = to_offset(freq)
result = cls._simple_new(values, freq=freq, tz=tz)
if freq == 'infer':
inferred = result.inferred_freq
if inferred:
result.freq = to_offset(inferred)
# NB: Among other things not yet ported from the DatetimeIndex
# constructor, this does not call _deepcopy_if_needed
return result
# -----------------------------------------------------------------
# Descriptive Properties
@property
def _box_func(self):
return lambda x: Timestamp(x, freq=self.freq, tz=self.tz)
@cache_readonly
def dtype(self):
if self.tz is None:
return _NS_DTYPE
return DatetimeTZDtype('ns', self.tz)
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def _timezone(self):
""" Comparable timezone both for pytz / dateutil"""
return timezones.get_timezone(self.tzinfo)
@property
def offset(self):
"""get/set the frequency of the instance"""
msg = ('{cls}.offset has been deprecated and will be removed '
'in a future version; use {cls}.freq instead.'
.format(cls=type(self).__name__))
warnings.warn(msg, FutureWarning, stacklevel=2)
return self.freq
@offset.setter
def offset(self, value):
"""get/set the frequency of the instance"""
msg = ('{cls}.offset has been deprecated and will be removed '
'in a future version; use {cls}.freq instead.'
.format(cls=type(self).__name__))
warnings.warn(msg, FutureWarning, stacklevel=2)
self.freq = value
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return conversion.is_date_array_normalized(self.asi8, self.tz)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution(self):
return libresolution.resolution(self.asi8, self.tz)
# ----------------------------------------------------------------
# Array-like Methods
def __iter__(self):
"""
Return an iterator over the boxed values
Yields
-------
tstamp : Timestamp
"""
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = int(length / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = tslib.ints_to_pydatetime(data[start_i:end_i],
tz=self.tz, freq=self.freq,
box="timestamp")
for v in converted:
yield v
# -----------------------------------------------------------------
# Comparison Methods
def _has_same_tz(self, other):
zzone = self._timezone
# vzone sholdn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
vzone = timezones.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))
return zzone == vzone
def _assert_tzawareness_compat(self, other):
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, 'tzinfo', None)
if is_datetime64tz_dtype(other):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError('Cannot compare tz-naive and tz-aware '
'datetime-like objects.')
elif other_tz is None:
raise TypeError('Cannot compare tz-naive and tz-aware '
'datetime-like objects')
# -----------------------------------------------------------------
# Arithmetic Methods
def _sub_datelike_dti(self, other):
"""subtraction of two DatetimeIndexes"""
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = self_i8 - other_i8
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view('timedelta64[ns]')
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self):
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
values = self.asi8
indexer = values.argsort()
result = conversion.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
reverse.put(indexer, np.arange(n))
return result.take(reverse)
def tz_convert(self, tz):
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
normalized : same type as self
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.DatetimeIndex(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.DatetimeIndex(start='2014-08-01 09:00',freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._shallow_copy(tz=tz)
def tz_localize(self, tz, ambiguous='raise', errors='raise'):
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
Time zone localization helps to switch from time zone aware to time
zone unaware objects.
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : str {'infer', 'NaT', 'raise'} or bool array,
default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
errors : {'raise', 'coerce'}, default 'raise'
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified time zone (e.g. due to a transition from
or to DST time)
- 'coerce' will return NaT if the timestamp can not be converted
to the specified time zone
.. versionadded:: 0.19.0
Returns
-------
result : same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq='D')
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
"""
if self.tz is not None:
if tz is None:
new_dates = conversion.tz_convert(self.asi8, 'UTC', self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = conversion.tz_localize_to_utc(self.asi8, tz,
ambiguous=ambiguous,
errors=errors)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self):
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
Parameters
----------
locale : string, default None (English locale)
locale determining the language in which to return the month name
Returns
-------
month_names : Index
Index of month names
.. versionadded:: 0.23.0
"""
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
else:
values = self.asi8
result = fields.get_date_name_field(values, 'month_name',
locale=locale)
result = self._maybe_mask_results(result)
return result
def day_name(self, locale=None):
"""
Return the day names of the DateTimeIndex with specified locale.
Parameters
----------
locale : string, default None (English locale)
locale determining the language in which to return the day name
Returns
-------
month_names : Index
Index of day names
.. versionadded:: 0.23.0
"""
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
else:
values = self.asi8
result = fields.get_date_name_field(values, 'day_name',
locale=locale)
result = self._maybe_mask_results(result)
return result
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
if self.tz is not None and self.tz is not utc:
timestamps = self._local_timestamps()
else:
timestamps = self.asi8
return tslib.ints_to_pydatetime(timestamps, box="time")
@property
def date(self):
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
if self.tz is not None and self.tz is not utc:
timestamps = self._local_timestamps()
else:
timestamps = self.asi8
return tslib.ints_to_pydatetime(timestamps, box="date")
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M',
"The month as January=1, December=12")
day = _field_accessor('day', 'D', "The days of the datetime")
hour = _field_accessor('hour', 'h', "The hours of the datetime")
minute = _field_accessor('minute', 'm', "The minutes of the datetime")
second = _field_accessor('second', 's', "The seconds of the datetime")
microsecond = _field_accessor('microsecond', 'us',
"The microseconds of the datetime")
nanosecond = _field_accessor('nanosecond', 'ns',
"The nanoseconds of the datetime")
weekofyear = _field_accessor('weekofyear', 'woy',
"The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow',
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
weekday_name = _field_accessor(
'weekday_name',
'weekday_name',
"The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0")
dayofyear = _field_accessor('dayofyear', 'doy',
"The ordinal day of the year")
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
days_in_month = _field_accessor(
'days_in_month',
'dim',
"The number of days in the month")
daysinmonth = days_in_month
is_month_start = _field_accessor(
'is_month_start',
'is_month_start',
"Logical indicating if first day of month (defined by frequency)")
is_month_end = _field_accessor(
'is_month_end',
'is_month_end',
"""
Indicator for whether the date is the last day of the month.
Returns
-------
Series or array
For Series, returns a Series with boolean values. For
DatetimeIndex, returns a boolean array.
See Also
--------
is_month_start : Indicator for whether the date is the first day
of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2018-02-27", periods=3))
>>> dates
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> dates.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.is_month_end
array([False, True, False], dtype=bool)
""")
is_quarter_start = _field_accessor(
'is_quarter_start',
'is_quarter_start',
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_start=df.dates.dt.is_quarter_start)
dates quarter is_quarter_start
0 2017-03-30 1 False
1 2017-03-31 1 False
2 2017-04-01 2 True
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_start
array([False, False, True, False])
""")
is_quarter_end = _field_accessor(
'is_quarter_end',
'is_quarter_end',
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_end=df.dates.dt.is_quarter_end)
dates quarter is_quarter_end
0 2017-03-30 1 False
1 2017-03-31 1 True
2 2017-04-01 2 False
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_end
array([False, True, False, False])
""")
is_year_start = _field_accessor(
'is_year_start',
'is_year_start',
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_start
array([False, False, True])
""")
is_year_end = _field_accessor(
'is_year_end',
'is_year_end',
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_end
array([False, True, False])
""")
is_leap_year = _field_accessor(
'is_leap_year',
'is_leap_year',
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series or ndarray
Booleans indicating if dates belong to a leap year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y")
>>> idx
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[ns]', freq='A-DEC')
>>> idx.is_leap_year
array([ True, False, False], dtype=bool)
>>> dates = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[ns]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
""")
def to_julian_date(self):
"""
Convert Datetime Array to float64 ndarray of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
http://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return (day +
np.fix((153 * month - 457) / 5) +
365 * year +
np.floor(year / 4) -
np.floor(year / 100) +
np.floor(year / 400) +
1721118.5 +
(self.hour +
self.minute / 60.0 +
self.second / 3600.0 +
self.microsecond / 3600.0 / 1e+6 +
self.nanosecond / 3600.0 / 1e+9
) / 24.0)
| bsd-3-clause |
mahat/LogisticRegression | Utils.py | 1 | 2105 | '''
Author: mahat
'''
import pandas as pd
import numpy as np
# getting titanic dataset
def getTitanicDataSet():
# loading data
rawData = pd.read_csv('./data/Titanic.csv', delimiter=',', index_col=0, header=0)
df = rawData.drop(['Name', 'Ticket', 'Cabin'], 1)
#print 'Titanic Dataset Description'
#print df.describe()
# handling nan variables by droping rows with Nan values
df_no_missing = df.dropna()
print 'Titanic Dataset Description after dropping rows with missing values'
print df_no_missing.describe()
print df_no_missing.dtypes
# converting categorical data into numerical data
df_no_missing['Gender'] = df_no_missing['Sex'].map({'female': 0, 'male': 1}).astype(int)
Ports = list(enumerate(np.unique(df_no_missing['Embarked']))) # determine all values of Embarked,
Ports_dict = {name: i for i, name in Ports}
df_no_missing.Embarked = df_no_missing.Embarked.map(lambda x: Ports_dict[x]).astype(int)
# replacing categorical data with onehotencoding
embarked_one_hot_coding = pd.get_dummies(df_no_missing['Embarked']).rename(columns=lambda x: 'Emb_' + str(x))
pclass_one_hot_coding = pd.get_dummies(df_no_missing['Pclass']).rename(columns=lambda x: 'Pclass_' + str(x))
# concat old df and onehotencodings
df_no_missing_and_encoded = pd.concat([df_no_missing, embarked_one_hot_coding, pclass_one_hot_coding], axis=1)
# print df_no_missing_and_encoded
# drop unused cols and convert data to float
df_no_missing_and_encoded_cleaned = df_no_missing_and_encoded.drop(['Sex', 'Embarked', 'Pclass'], axis=1)
df_no_missing_and_encoded_cleaned = df_no_missing_and_encoded_cleaned.applymap(np.float)
# print df_no_missing_and_encoded_cleaned
# creating dataset
feat_cols = [col for col in df_no_missing_and_encoded_cleaned.columns if col not in ['Survived', 'PassengerId']]
X = [elem for elem in df_no_missing_and_encoded_cleaned[feat_cols].values]
Y = [elem for elem in df_no_missing_and_encoded_cleaned['Survived'].values]
return [X,Y,df_no_missing_and_encoded_cleaned.columns] | gpl-3.0 |
smartscheduling/scikit-learn-categorical-tree | sklearn/semi_supervised/label_propagation.py | 24 | 15181 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
macks22/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 129 | 43401 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
hitszxp/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 16 | 5134 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from numpy.testing import assert_raises
from scipy.spatial import distance
from sklearn.utils.testing import assert_equal
from sklearn.cluster.dbscan_ import DBSCAN, dbscan
from .common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
"""Tests the DBSCAN algorithm with a similarity array."""
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
"""Tests the DBSCAN algorithm with a feature vector array."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_callable():
"""Tests the DBSCAN algorithm with a callable metric."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
"""Tests the DBSCAN algorithm with balltree for neighbor calculation."""
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
"""DBSCAN.fit should accept a list of lists."""
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
"""Test bad argument values: these should all raise ValueErrors"""
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
| bsd-3-clause |
isomerase/MyPyGLM | STA_grasshopper_ex.py | 2 | 5644 | """
.. _grasshopper:
=====================================
Auditory processing in grasshoppers
=====================================
Extracting the average time-series from one signal, time-locked to the
occurence of some type of event in another signal is a very typical operation in
the analysis of time-series from neuroscience experiments. Therefore, we have
an additional example of this kind of analysis in :ref:`et-fmri`
In the following code-snippet, we demonstrate the calculation of the
spike-triggered average (STA). This is the average of the stimulus wave-form
preceding the emission of a spike in the neuron and can be thought of as the
stimulus 'preferred' by this neuron.
We start by importing the required modules:
"""
import os
import numpy as np
import nitime
import nitime.timeseries as ts
import nitime.analysis as tsa
import nitime.viz as viz
from matplotlib import pyplot as plt
"""
Two data files are used in this example. The first contains the times of action
potentials ('spikes'), recorded intra-cellularly from primary auditory
receptors in the grasshopper *Locusta Migratoria*.
We read in these times and initialize an Events object from them. The
spike-times are given in micro-seconds:
"""
data_path = os.path.join(nitime.__path__[0], 'data')
spike_times = np.loadtxt(os.path.join(data_path, 'grasshopper_spike_times1.txt'))
spike_ev = ts.Events(spike_times, time_unit='us')
"""
The first data file contains the stimulus that was played during the
recording. Briefly, the stimulus played was a pure-tone in the cell's preferred
frequency amplitude modulated by Gaussian white-noise, up to a cut-off
frequency (200 Hz in this case, for details on the experimental procedures and
the stimulus see [Rokem2006]_).
"""
stim = np.loadtxt(os.path.join(data_path, 'grasshopper_stimulus1.txt'))
"""
The stimulus needs to be transformed from Volts to dB:
"""
def volt2dB(stim, maxdB=100):
stim = (20 * 1 / np.log(10)) * (np.log(stim[:, 1] / 2.0e-5))
return maxdB - stim.max() + stim
stim = volt2dB(stim, maxdB=76.4286) # maxdB taken from the spike file header
"""
We create a time-series object for the stimulus, which was sampled at 20 kHz:
"""
stim_time_series = ts.TimeSeries(t0=0,
data=stim,
sampling_interval=50,
time_unit='us')
"""
Note that the time-representation will not change if we now convert the
time-unit into ms. The only thing this accomplishes is to use this time-unit in
subsequent visualization of the resulting time-series
"""
stim_time_series.time_unit = 'ms'
"""
Next, we initialize an EventRelatedAnalyzer:
"""
event_related = tsa.EventRelatedAnalyzer(stim_time_series,
spike_ev,
len_et=200,
offset=-200)
"""
The actual STA gets calculated in this line (the call to 'event_related.eta')
and the result gets input directly into the plotting function:
"""
fig01 = viz.plot_tseries(event_related.eta, ylabel='Amplitude (dB SPL)')
"""
We prettify the plot a bit by adding a dashed line at the mean of the stimulus
"""
ax = fig01.get_axes()[0]
xlim = ax.get_xlim()
ylim = ax.get_ylim()
mean_stim = np.mean(stim_time_series.data)
ax.plot([xlim[0], xlim[1]], [mean_stim, mean_stim], 'k--')
"""
.. image:: fig/grasshopper_01.png
In the following example, a second channel has been added to both the stimulus
and the spike-train time-series. This is the response of the same cell, to a
different stimulus, in which the frequency modulation has a higher frequency
cut-off (800 Hz).
"""
stim2 = np.loadtxt(os.path.join(data_path, 'grasshopper_stimulus2.txt'))
stim2 = volt2dB(stim2, maxdB=76.4286)
spike_times2 = np.loadtxt(os.path.join(data_path, 'grasshopper_spike_times2.txt'))
"""
We loop over the two spike-time events and stimulus time-series:
"""
et = []
means = []
for stim, spike in zip([stim, stim2], [spike_times, spike_times2]):
stim_time_series = ts.TimeSeries(t0=0, data=stim, sampling_interval=50,
time_unit='us')
stim_time_series.time_unit = 'ms'
spike_ev = ts.Events(spike, time_unit='us')
#Initialize the event-related analyzer
event_related = tsa.EventRelatedAnalyzer(stim_time_series,
spike_ev,
len_et=200,
offset=-200)
"""
This is the line which actually executes the analysis
"""
et.append(event_related.eta)
means.append(np.mean(stim_time_series.data))
"""
Stack the data from both time-series, initialize a new time-series and plot it:
"""
fig02 = viz.plot_tseries(
ts.TimeSeries(data=np.vstack([et[0].data, et[1].data]),
sampling_rate=et[0].sampling_rate, time_unit='ms'))
ax = fig02.get_axes()[0]
xlim = ax.get_xlim()
ax.plot([xlim[0], xlim[1]], [means[0], means[0]], 'b--')
ax.plot([xlim[0], xlim[1]], [means[1], means[1]], 'g--')
"""
.. image:: fig/grasshopper_02.png
plt.show() is called in order to display the figures
"""
plt.show()
"""
The data used in this example is also available on the `CRCNS data sharing
web-site <http://crcns.org/>`_.
.. [Rokem2006] Ariel Rokem, Sebastian Watzl, Tim Gollisch, Martin Stemmler,
Andreas V M Herz and Ines Samengo (2006). Spike-timing precision
underlies the coding efficiency of auditory receptor neurons. J
Neurophysiol, 95:2541--52
"""
| mit |
hansonzeng/DeloitteTMT | hello.py | 1 | 6370 | from cloudant import Cloudant
from flask import Flask, render_template, request, jsonify, flash
import atexit
import cf_deployment_tracker
import os
import json
import pandas as pd
from watson_developer_cloud import VisualRecognitionV3
from flask_uploads import UploadSet, IMAGES, configure_uploads
from classifyAssociation import(classify, lookup, convert_category)
# Emit Bluemix deployment event
cf_deployment_tracker.track()
app = Flask(__name__)
db_name = 'mydb'
client = None
db = None
classifier = ["FullClassifier_1661818688", "default"]
api_key = "3392034336ce62e110d77c8ea9a2d32d372ba0aa"
#two instances created in this app
visual_recognition = VisualRecognitionV3('2016-05-20', api_key=api_key)
if 'VCAP_SERVICES' in os.environ:
vcap = json.loads(os.getenv('VCAP_SERVICES'))
print('Found VCAP_SERVICES')
if 'cloudantNoSQLDB' in vcap:
creds = vcap['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif os.path.isfile('vcap-local.json'):
with open('vcap-local.json') as f:
vcap = json.load(f)
print('Found local VCAP_SERVICES')
creds = vcap['services']['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
# On Bluemix, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 8000
port = int(os.getenv('PORT', 9000))
photos = UploadSet('photos', IMAGES)
app.config['UPLOADED_PHOTOS_DEST'] = 'static/img'
configure_uploads(app, photos)
#https://www.youtube.com/watch?v=Exf8RbgKmhM
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST' and 'file' in request.files:
filename = photos.save(request.files['file'])
flash("Photo saved.")
return filename
return render_template('index.html')
@app.route("/results", methods=['POST'])
def recognise_image():
result_items = list()
x = visual_recognition
imgurl = request.form['imgurl']
# print imgurl
img = x.classify(images_url=imgurl, classifier_ids=classifier)
classes = img['images'][0]['classifiers'][0]['classes']
custom_classes = img['custom_classes']
images_processed = img['images_processed']
if img['images'][0]['source_url']:
source_url = img['images'][0]['source_url']
else:
source_url = ''
if img['images'][0]['resolved_url']:
resolved_url = img['images'][0]['resolved_url']
else:
resolved_url = ''
complete_response = json.dumps(img, sort_keys = True, indent = 4, separators = (',', ': '))
return render_template('show_results.html', json_resp=classes, custom_classes=custom_classes,
images_processed=images_processed, source_url=source_url, resolved_url=resolved_url, complete_response=complete_response)
@app.route("/mba_results", methods=['GET','POST'])
def mba_results():
list = os.listdir('static/img')
num_imgs = len(list)
if 'classify_image' in request.files:
save_path = 'static/img/classified_%s.jpg' % num_imgs
request.files['classify_image'].save(save_path)
passed_image = open(save_path, 'rb')
classification = classify(images_file=passed_image, classifier=classifier, min_score=0.4, api_key=api_key)
print("image classified as ")
print(classification)
# added cars because bikes isn't one of the groups in the MBA - still awaiting group confirmation from Kate
# This will also simulate if multiple classifications are returned
# classification.append('Cars')
print("Determining association rules")
mapped_categories = []
for i in classification:
mapped_categories.append(convert_category(i))
# parameter used to run this
lookup_values = mapped_categories
# Shows how to get some of the output
total_rules = []
times = len(lookup_values)
for i in range(0, times):
interest = lookup_values[i]
print(interest)
print("looking up")
print("top two association rules are...")
# gets rule set from rules table
rules = lookup(interest, rules_to_return=2, classResult=classification[i])
if rules is not None:
total_rules.append(rules)
# checks if lookup worked
if isinstance(rules, pd.DataFrame):
print(rules)
else:
print(rules)
print(type(total_rules))
return render_template('show_results_2.html', passed_image=save_path, rules_for_classes=total_rules)
@app.route('/photo/<id>')
def show(id):
photo = Photo.load(id)
if photo is None:
abort(404)
url = photos.url(photo.filename)
return render_template('show.html', url=url, photo=photo)
@app.route('/')
def home():
return render_template('index.html')
# /* Endpoint to greet and add a new visitor to database.
# * Send a POST request to localhost:8000/api/visitors with body
# * {
# * "name": "Bob"
# * }
# */
@app.route('/api/visitors', methods=['GET'])
def get_visitor():
if client:
return jsonify(list(map(lambda doc: doc['name'], db)))
else:
print('No database')
return jsonify([])
# /**
# * Endpoint to get a JSON array of all the visitors in the database
# * REST API example:
# * <code>
# * GET http://localhost:8000/api/visitors
# * </code>
# *
# * Response:
# * [ "Bob", "Jane" ]
# * @return An array of all the visitor names
# */
@app.route('/api/visitors', methods=['POST'])
def put_visitor():
user = request.json['name']
if client:
data = {'name':user}
db.create_document(data)
return 'Hello %s! I added you to the database.' % user
else:
print('No database')
return 'Hello %s!' % user
@atexit.register
def shutdown():
if client:
client.disconnect()
if __name__ == '__main__':
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem'
app.run(host='0.0.0.0', port=port, debug=True)
| apache-2.0 |
sarathid/Python-works | Intro_to_ML/outliers/outlier_removal_regression.py | 11 | 2376 | #!/usr/bin/python
import random
import numpy
import matplotlib.pyplot as plt
import pickle
from outlier_cleaner import outlierCleaner
### load up some practice data with outliers in it
ages = pickle.load( open("practice_outliers_ages.pkl", "r") )
net_worths = pickle.load( open("practice_outliers_net_worths.pkl", "r") )
### ages and net_worths need to be reshaped into 2D numpy arrays
### second argument of reshape command is a tuple of integers: (n_rows, n_columns)
### by convention, n_rows is the number of data points
### and n_columns is the number of features
ages = numpy.reshape( numpy.array(ages), (len(ages), 1))
net_worths = numpy.reshape( numpy.array(net_worths), (len(net_worths), 1))
from sklearn.cross_validation import train_test_split
ages_train, ages_test, net_worths_train, net_worths_test = train_test_split(ages, net_worths, test_size=0.1, random_state=42)
### fill in a regression here! Name the regression object reg so that
### the plotting code below works, and you can see what your regression looks like
try:
plt.plot(ages, reg.predict(ages), color="blue")
except NameError:
pass
plt.scatter(ages, net_worths)
plt.show()
### identify and remove the most outlier-y points
cleaned_data = []
try:
predictions = reg.predict(ages_train)
cleaned_data = outlierCleaner( predictions, ages_train, net_worths_train )
except NameError:
print "your regression object doesn't exist, or isn't name reg"
print "can't make predictions to use in identifying outliers"
### only run this code if cleaned_data is returning data
if len(cleaned_data) > 0:
ages, net_worths, errors = zip(*cleaned_data)
ages = numpy.reshape( numpy.array(ages), (len(ages), 1))
net_worths = numpy.reshape( numpy.array(net_worths), (len(net_worths), 1))
### refit your cleaned data!
try:
reg.fit(ages, net_worths)
plt.plot(ages, reg.predict(ages), color="blue")
except NameError:
print "you don't seem to have regression imported/created,"
print " or else your regression object isn't named reg"
print " either way, only draw the scatter plot of the cleaned data"
plt.scatter(ages, net_worths)
plt.xlabel("ages")
plt.ylabel("net worths")
plt.show()
else:
print "outlierCleaner() is returning an empty list, no refitting to be done"
| gpl-3.0 |
lbishal/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 28 | 10384 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float32)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
maheshakya/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
henrynj/PMLMC | plot/euler_old/plot_vs.py | 1 | 7609 | #!/usr/bin/env python
##################################################
# PMLMC Plotting #
# #
# Jun Nie #
# Last modification: 31-01-2018 #
##################################################
import sys
import numpy as np
import matplotlib.pyplot as plt
from plot_helper import *
### plotting for various purpose
class plotting_total_cost:
"""
plotting total cost for diffent eps and diffenrent number of levels
"""
def __init__(self, eps, gamma):
self.eps = eps
self.gamma = gamma
self.cost_mc, self.cost_ml1, self.cost_ml2, self.cost_ml3 = [], [], [], []
self.styles = ['o--', 'x--', 'd--', '*--', 's--']
self.cal_cost(gamma)
def cal_cost(self, gamma):
# read variance from convergence_test
v_q, v_yl = read_variance()
# mc on different levels
for l, v in enumerate(v_q):
self.cost_mc.append(compute_mc_cost(v, eps, l, gamma))
# 2 levels
for l0 in range(0,3,1):
level = [l0, l0+1]
nm = calculate_optimal_number(level, v_yl[l0:l0+2], eps, gamma)
self.cost_ml1.append( compute_mlmc_cost(nm, gamma, l0) )
# 3 levels
for l0 in range(0,2,1):
level = [l0, l0+1, l0+2]
nm = calculate_optimal_number(level, v_yl[l0:l0+3], eps, gamma)
self.cost_ml2.append( compute_mlmc_cost(nm, gamma, l0) )
# 4 levels
for l0 in range(0,1,1):
level = [l0, l0+1, l0+2, l0+3]
nm = calculate_optimal_number(level, v_yl[l0:l0+4], eps, gamma)
self.cost_ml3.append( compute_mlmc_cost(nm, gamma, l0) )
def plot(self):
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
plt.semilogy(range(4), cost_mc, styles[0], label='MC')
plt.semilogy(range(1,4,1), cost_ml1, styles[1], label='MLMC: 2 levels')
plt.semilogy(range(2,4,1), cost_ml2, styles[2], label='MLMC: 3 levels')
plt.semilogy(range(3,4,1), cost_ml3, styles[3], label='MLMC: 4 levels')
plt.xlim(-1, 3+1)
plt.xlabel('level $l$')
plt.ylabel('Standardised Cost')
plt.legend(loc='upper left', frameon=True)
plt.title('$eps=%.2f$' %self.eps)
plt.savefig('cost_for_eps_%.2f.pdf' %self.eps)
plt.close()
### of no meaning
def plot_cost_vs_eps(gamma = 1.4):
epss = [1e-2,2e-2,5e-2]
v_q, v_yl = read_variance()
cost_mc = []
cost_ml = []
level = [0,1,2,3]
for eps in epss:
cost_mc.append( compute_mc_cost(v_q[-1], eps, 3, gamma)*eps**2 )
nm = calculate_optimal_number(level, v_yl[0:4], eps, gamma)
cost_ml.append( compute_mlmc_cost(nm, gamma, 0)*eps**2 )
styles = ['o--', 'x--', 'd--', '*--', 's--']
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
plt.loglog(epss, cost_mc, styles[0], label='MC')
plt.loglog(epss, cost_ml, styles[1], label='MLMC')
#plt.xlim(-1, 3+1)
plt.xlabel('accuracy $\epsilon$')
plt.ylabel('Standardised Cost')
plt.legend(loc='upper left', frameon=True)
#plt.title('$eps=%.2f$' %eps)
plt.savefig('cost_vs_eps.pdf')
plt.close()
### plot number of samples per level
def plot_nm(eps = 2e-2, gamma = 1.4):
v_q, v_yl = read_variance()
nm_mc = int( np.ceil(2*v_q[-1]/(eps**2)) )
level = [2,3]
nm_ml2 = calculate_optimal_number(level, v_yl[2:4], eps, gamma)
level = [1,2,3]
nm_ml3 = calculate_optimal_number(level, v_yl[1:4], eps, gamma)
level = [0,1,2,3]
nm_ml4 = calculate_optimal_number(level, v_yl[0:4], eps, gamma)
### plotting
styles = ['o--', 'x--', 'd--', '*--', 's--']
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
plt.semilogy(3, nm_mc, styles[0], label='MC')
plt.semilogy(range(2,4,1), nm_ml2, styles[1], label='MLMC: 2 levels')
plt.semilogy(range(1,4,1), nm_ml3, styles[2], label='MLMC: 3 levels')
plt.semilogy(range(0,4,1), nm_ml4, styles[3], label='MLMC: 4 levels')
plt.xlim(-1, 3+1)
plt.xlabel('level $l$')
plt.ylabel(r'$N_l$')
plt.legend(loc='upper right', frameon=True)
plt.title('$eps=%.2f$' %eps)
plt.savefig('nm_for_eps_%.2f.pdf' %eps)
plt.close()
print nm_ml2, nm_ml3, nm_ml4
def plot_nm_vs_eps(gamma=1.4):
v_q, v_yl = read_variance()
#nm_mc = int( np.ceil(2*v_q[-1]/(eps**2)) )
epss = [1e-2,2e-2,5e-2]
level = [0,1,2,3]
nm_ml2 = calculate_optimal_number(level, v_yl[0:4], epss[0], gamma)
nm_ml3 = calculate_optimal_number(level, v_yl[0:4], epss[1], gamma)
nm_ml4 = calculate_optimal_number(level, v_yl[0:4], epss[2], gamma)
### plotting
styles = ['o--', 'x--', 'd--', '*--', 's--']
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
#plt.semilogy(3, nm_mc, styles[0], label='MC')
plt.semilogy(range(0,4,1), nm_ml2, styles[0], label='MLMC: eps=%.2f' %epss[0])
plt.semilogy(range(0,4,1), nm_ml3, styles[1], label='MLMC: eps=%.2f' %epss[1])
plt.semilogy(range(0,4,1), nm_ml4, styles[2], label='MLMC: eps=%.2f' %epss[2])
#plt.xlim(-1, 3+1)
plt.xlabel('level $l$')
plt.ylabel(r'$N_l$')
plt.legend(loc='upper right', frameon=True)
#plt.title('$eps=%.2f$' %eps)
plt.savefig('nm_vs_eps.pdf')
plt.close()
def plot():
epss = [1e-1, 5e-2, 2e-2, 1e-2]#, 2e-3]
gamma = 1.4
v_q, v_yl = read_variance()
cost_mc = []
cost_ml1 = []
cost_ml2 = []
cost_ml3 = []
for eps in epss:
cost_mc.append( compute_mc_cost( v_q[-1], eps, 3, gamma ) )
level = [2,3]
nm = calculate_optimal_number(level, v_yl[2:4], eps, gamma)
cost_ml1.append( compute_mlmc_cost(nm, gamma, 2) )
level = [1,2,3]
nm = calculate_optimal_number(level, v_yl[1:4], eps, gamma)
cost_ml2.append( compute_mlmc_cost(nm, gamma, 1) )
level = [0,1,2,3]
nm = calculate_optimal_number(level, v_yl[0:4], eps, gamma)
cost_ml3.append( compute_mlmc_cost(nm, gamma, 0) )
styles = ['o--', 'x--', 'd--', '*--', 's--']
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
plt.loglog(cost_mc, epss, styles[0], label='MC')
plt.loglog(cost_ml1, epss, styles[1], label='MLMC: 2 levels')
plt.loglog(cost_ml2, epss, styles[2], label='MLMC: 3 levels')
plt.loglog(cost_ml3, epss, styles[3], label='MLMC: 4 levels')
saving = np.zeros(3)
saving[0] = cost_mc[2] / cost_ml1[2]
saving[1] = cost_mc[2] / cost_ml2[2]
saving[2] = cost_mc[2] / cost_ml3[2]
print saving
eps = estimate_error()
plt.axhline(y=eps, linewidth=.05, color='k')
eps1 = estimate_error(L=2)
plt.axhline(y=eps1, linewidth=.05, color='r')
#plt.ylim(2e-3, 2e-1)
plt.xlabel('Standardised Cost')
plt.ylabel('Standard deviation of estimator')
plt.legend(loc='upper right', frameon=True)
plt.savefig('eps_vs_cost.pdf')
plt.close()
if __name__ == '__main__':
eps = 5e-2; gamma = 1.4
#plot_cost()
#plot_cost_vs_eps()
#plot_nm()
#plot_nm_vs_eps()
plot_cost_vs_eps() | gpl-3.0 |
mjirik/lisa | tests/shape_model_test.py | 1 | 1313 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 mjirik <mjirik@hp-mjirik>
#
# Distributed under terms of the MIT license.
"""
"""
import unittest
# import pytest
import lisa.shape_model as shm
import numpy as np
class ShapeModelTest(unittest.TestCase):
# @pytest.mark.interactive
def test_shape_model(self):
"""
Run shape model
"""
sm = shm.ShapeModel()
sm.model_margin = [0, 0, 0]
# train with first model
sh0 = np.zeros([20, 21, 1])
sh0[13:19, 7:16] = 1
sh0[17:19, 12:16] = 0
sh0[13:15, 13:16] = 0
sh0_vs = [2, 1, 1]
sm.train_one(sh0, sh0_vs)
# train with second model
sh1 = np.zeros([40, 20, 1])
sh1[16:27, 7:13] = 1
sh1[23:27, 11:13] = 0
sh1_vs = [1, 1, 1]
sm.train_one(sh1, sh1_vs)
sm.get_model([[15, 25], [10, 25], [0, 1]], [30, 30, 1])
# print mdl.shape
# import matplotlib.pyplot as plt
# import ipdb; ipdb.set_trace()
# plt.imshow(np.squeeze(sh1))
# plt.imshow(np.squeeze(sm.model[:, :, 0]))
# plt.imshow(np.squeeze(mdl))
# plt.show()
# import sed3
# ed = sed3.sed3(sh1)
# ed.show()
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
PyPSA/PyPSA | pypsa/linopf.py | 1 | 45233 | ## Copyright 2019 Tom Brown (KIT), Fabian Hofmann (FIAS)
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Build optimisation problems from PyPSA networks without Pyomo.
Originally retrieved from nomopyomo ( -> 'no more Pyomo').
"""
from .pf import (_as_snapshots, get_switchable_as_dense as get_as_dense)
from .descriptors import (get_bounds_pu, get_extendable_i, get_non_extendable_i,
expand_series, nominal_attrs, additional_linkports, Dict)
from .linopt import (linexpr, write_bound, write_constraint, write_objective,
set_conref, set_varref, get_con, get_var, join_exprs,
run_and_read_cbc, run_and_read_gurobi, run_and_read_glpk,
run_and_read_cplex, run_and_read_xpress,
define_constraints, define_variables, define_binaries,
align_with_static_component)
import pandas as pd
import numpy as np
from numpy import inf
from distutils.version import LooseVersion
pd_version = LooseVersion(pd.__version__)
import gc, time, os, re, shutil
from tempfile import mkstemp
import logging
logger = logging.getLogger(__name__)
lookup = pd.read_csv(os.path.join(os.path.dirname(__file__), 'variables.csv'),
index_col=['component', 'variable'])
def define_nominal_for_extendable_variables(n, c, attr):
"""
Initializes variables for nominal capacities for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
network component of which the nominal capacity should be defined
attr : str
name of the variable, e.g. 'p_nom'
"""
ext_i = get_extendable_i(n, c)
if ext_i.empty: return
lower = n.df(c)[attr+'_min'][ext_i]
upper = n.df(c)[attr+'_max'][ext_i]
define_variables(n, lower, upper, c, attr)
def define_dispatch_for_extendable_and_committable_variables(n, sns, c, attr):
"""
Initializes variables for power dispatch for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
ext_i = get_extendable_i(n, c)
if c == 'Generator':
ext_i = ext_i.union(n.generators.query('committable').index)
if ext_i.empty: return
define_variables(n, -inf, inf, c, attr, axes=[sns, ext_i], spec='ext')
def define_dispatch_for_non_extendable_variables(n, sns, c, attr):
"""
Initializes variables for power dispatch for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
fix_i = get_non_extendable_i(n, c)
if c == 'Generator':
fix_i = fix_i.difference(n.generators.query('committable').index)
if fix_i.empty: return
nominal_fix = n.df(c)[nominal_attrs[c]][fix_i]
min_pu, max_pu = get_bounds_pu(n, c, sns, fix_i, attr)
lower = min_pu.mul(nominal_fix)
upper = max_pu.mul(nominal_fix)
axes = [sns, fix_i]
dispatch = define_variables(n, -inf, inf, c, attr, axes=axes, spec='non_ext')
dispatch = linexpr((1, dispatch))
define_constraints(n, dispatch, '>=', lower, c, 'mu_lower', spec='non_ext')
define_constraints(n, dispatch, '<=', upper, c, 'mu_upper', spec='non_ext')
def define_dispatch_for_extendable_constraints(n, sns, c, attr):
"""
Sets power dispatch constraints for extendable devices for a given
component and a given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
ext_i = get_extendable_i(n, c)
if ext_i.empty: return
min_pu, max_pu = get_bounds_pu(n, c, sns, ext_i, attr)
operational_ext_v = get_var(n, c, attr)[ext_i]
nominal_v = get_var(n, c, nominal_attrs[c])[ext_i]
rhs = 0
lhs, *axes = linexpr((max_pu, nominal_v), (-1, operational_ext_v),
return_axes=True)
define_constraints(n, lhs, '>=', rhs, c, 'mu_upper', axes=axes, spec=attr)
lhs, *axes = linexpr((min_pu, nominal_v), (-1, operational_ext_v),
return_axes=True)
define_constraints(n, lhs, '<=', rhs, c, 'mu_lower', axes=axes, spec=attr)
def define_fixed_variable_constraints(n, sns, c, attr, pnl=True):
"""
Sets constraints for fixing variables of a given component and attribute
to the corresponding values in n.df(c)[attr + '_set'] if pnl is True, or
n.pnl(c)[attr + '_set']
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
pnl : bool, default True
Whether variable which should be fixed is time-dependent
"""
if pnl:
if attr + '_set' not in n.pnl(c): return
fix = n.pnl(c)[attr + '_set'].unstack().dropna()
if fix.empty: return
lhs = linexpr((1, get_var(n, c, attr).unstack()[fix.index]), as_pandas=False)
constraints = write_constraint(n, lhs, '=', fix).unstack().T
else:
if attr + '_set' not in n.df(c): return
fix = n.df(c)[attr + '_set'].dropna()
if fix.empty: return
lhs = linexpr((1, get_var(n, c, attr)[fix.index]), as_pandas=False)
constraints = write_constraint(n, lhs, '=', fix)
set_conref(n, constraints, c, f'mu_{attr}_set')
def define_generator_status_variables(n, snapshots):
com_i = n.generators.query('committable').index
ext_i = get_extendable_i(n, 'Generator')
if not (ext_i.intersection(com_i)).empty:
logger.warning("The following generators have both investment optimisation"
f" and unit commitment:\n\n\t{', '.join((ext_i.intersection(com_i)))}\n\nCurrently PyPSA cannot "
"do both these functions, so PyPSA is choosing investment optimisation "
"for these generators.")
com_i = com_i.difference(ext_i)
if com_i.empty: return
define_binaries(n, (snapshots, com_i), 'Generator', 'status')
def define_committable_generator_constraints(n, snapshots):
c, attr = 'Generator', 'status'
com_i = n.df(c).query('committable and not p_nom_extendable').index
if com_i.empty: return
nominal = n.df(c)[nominal_attrs[c]][com_i]
min_pu, max_pu = get_bounds_pu(n, c, snapshots, com_i, 'p')
lower = min_pu.mul(nominal)
upper = max_pu.mul(nominal)
status = get_var(n, c, attr)
p = get_var(n, c, 'p')[com_i]
lhs = linexpr((lower, status), (-1, p))
define_constraints(n, lhs, '<=', 0, 'Generators', 'committable_lb')
lhs = linexpr((upper, status), (-1, p))
define_constraints(n, lhs, '>=', 0, 'Generators', 'committable_ub')
def define_ramp_limit_constraints(n, sns):
"""
Defines ramp limits for generators with valid ramplimit
"""
c = 'Generator'
rup_i = n.df(c).query('ramp_limit_up == ramp_limit_up').index
rdown_i = n.df(c).query('ramp_limit_down == ramp_limit_down').index
if rup_i.empty & rdown_i.empty:
return
fix_i = get_non_extendable_i(n, c)
ext_i = get_extendable_i(n, c)
com_i = n.df(c).query('committable').index.difference(ext_i)
p = get_var(n, c, 'p').loc[sns[1:]]
p_prev = get_var(n, c, 'p').shift(1).loc[sns[1:]]
# fix up
gens_i = rup_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', spec='nonext.')
# ext up
gens_i = rup_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_up'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (-limit_pu, p_nom))
define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', spec='ext.')
# com up
gens_i = rup_i.intersection(com_i)
if not gens_i.empty:
limit_start = n.df(c).loc[gens_i].eval('ramp_limit_start_up * p_nom')
limit_up = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
status = get_var(n, c, 'status').loc[sns[1:], gens_i]
status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_start - limit_up, status_prev),
(- limit_start, status))
define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', spec='com.')
# fix down
gens_i = rdown_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = n.df(c).loc[gens_i].eval('-1 * ramp_limit_down * p_nom')
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', spec='nonext.')
# ext down
gens_i = rdown_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_down'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_pu, p_nom))
define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', spec='ext.')
# com down
gens_i = rdown_i.intersection(com_i)
if not gens_i.empty:
limit_shut = n.df(c).loc[gens_i].eval('ramp_limit_shut_down * p_nom')
limit_down = n.df(c).loc[gens_i].eval('ramp_limit_down * p_nom')
status = get_var(n, c, 'status').loc[sns[1:], gens_i]
status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_down - limit_shut, status),
(limit_shut, status_prev))
define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', spec='com.')
def define_nominal_constraints_per_bus_carrier(n, sns):
for carrier in n.carriers.index:
for bound, sense in [("max", "<="), ("min", ">=")]:
col = f'nom_{bound}_{carrier}'
if col not in n.buses.columns: continue
rhs = n.buses[col].dropna()
lhs = pd.Series('', rhs.index)
for c, attr in nominal_attrs.items():
if c not in n.one_port_components: continue
attr = nominal_attrs[c]
if (c, attr) not in n.variables.index: continue
nominals = get_var(n, c, attr)[n.df(c).carrier == carrier]
if nominals.empty: continue
per_bus = linexpr((1, nominals)).groupby(n.df(c).bus).sum()
lhs += per_bus.reindex(lhs.index, fill_value='')
if bound == 'max':
lhs = lhs[lhs != '']
rhs = rhs.reindex(lhs.index)
else:
assert (lhs != '').all(), (
f'No extendable components of carrier {carrier} on bus '
f'{list(lhs[lhs == ""].index)}')
define_constraints(n, lhs, sense, rhs, 'Bus', 'mu_' + col)
def define_nodal_balance_constraints(n, sns):
"""
Defines nodal balance constraint.
"""
def bus_injection(c, attr, groupcol='bus', sign=1):
# additional sign only necessary for branches in reverse direction
if 'sign' in n.df(c):
sign = sign * n.df(c).sign
expr = linexpr((sign, get_var(n, c, attr))).rename(columns=n.df(c)[groupcol])
# drop empty bus2, bus3 if multiline link
if c == 'Link':
expr.drop(columns='', errors='ignore', inplace=True)
return expr
# one might reduce this a bit by using n.branches and lookup
args = [['Generator', 'p'], ['Store', 'p'], ['StorageUnit', 'p_dispatch'],
['StorageUnit', 'p_store', 'bus', -1], ['Line', 's', 'bus0', -1],
['Line', 's', 'bus1', 1], ['Transformer', 's', 'bus0', -1],
['Transformer', 's', 'bus1', 1], ['Link', 'p', 'bus0', -1],
['Link', 'p', 'bus1', get_as_dense(n, 'Link', 'efficiency', sns)]]
args = [arg for arg in args if not n.df(arg[0]).empty]
for i in additional_linkports(n):
eff = get_as_dense(n, 'Link', f'efficiency{i}', sns)
args.append(['Link', 'p', f'bus{i}', eff])
kwargs = dict(numeric_only=False) if pd_version >= "1.3" else {}
lhs = (pd.concat([bus_injection(*arg) for arg in args], axis=1)
.groupby(axis=1, level=0)
.sum(**kwargs)
.reindex(columns=n.buses.index, fill_value=''))
sense = '='
rhs = ((- get_as_dense(n, 'Load', 'p_set', sns) * n.loads.sign)
.groupby(n.loads.bus, axis=1).sum()
.reindex(columns=n.buses.index, fill_value=0))
define_constraints(n, lhs, sense, rhs, 'Bus', 'marginal_price')
def define_kirchhoff_constraints(n, sns):
"""
Defines Kirchhoff voltage constraints
"""
comps = n.passive_branch_components & set(n.variables.index.levels[0])
if len(comps) == 0: return
branch_vars = pd.concat({c:get_var(n, c, 's') for c in comps}, axis=1)
def cycle_flow(ds):
ds = ds[lambda ds: ds!=0.].dropna()
vals = linexpr((ds, branch_vars[ds.index]), as_pandas=False)
return vals.sum(1)
constraints = []
for sub in n.sub_networks.obj:
branches = sub.branches()
C = pd.DataFrame(sub.C.todense(), index=branches.index)
if C.empty:
continue
carrier = n.sub_networks.carrier[sub.name]
weightings = branches.x_pu_eff if carrier == 'AC' else branches.r_pu_eff
C_weighted = 1e5 * C.mul(weightings, axis=0)
cycle_sum = C_weighted.apply(cycle_flow)
cycle_sum.index = sns
con = write_constraint(n, cycle_sum, '=', 0)
constraints.append(con)
if len(constraints) == 0: return
constraints = pd.concat(constraints, axis=1, ignore_index=True)
set_conref(n, constraints, 'SubNetwork', 'mu_kirchhoff_voltage_law')
def define_storage_unit_constraints(n, sns):
"""
Defines state of charge (soc) constraints for storage units. In principal
the constraints states:
previous_soc + p_store - p_dispatch + inflow - spill == soc
"""
sus_i = n.storage_units.index
if sus_i.empty: return
c = 'StorageUnit'
# spillage
upper = get_as_dense(n, c, 'inflow', sns).loc[:, lambda df: df.max() > 0]
spill = write_bound(n, 0, upper)
set_varref(n, spill, 'StorageUnit', 'spill')
eh = expand_series(n.snapshot_weightings.stores[sns], sus_i) #elapsed hours
eff_stand = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh)
eff_dispatch = expand_series(n.df(c).efficiency_dispatch, sns).T
eff_store = expand_series(n.df(c).efficiency_store, sns).T
soc = get_var(n, c, 'state_of_charge')
cyclic_i = n.df(c).query('cyclic_state_of_charge').index
noncyclic_i = n.df(c).query('~cyclic_state_of_charge').index
prev_soc_cyclic = soc.shift().fillna(soc.loc[sns[-1]])
coeff_var = [(-1, soc),
(-1/eff_dispatch * eh, get_var(n, c, 'p_dispatch')),
(eff_store * eh, get_var(n, c, 'p_store'))]
lhs, *axes = linexpr(*coeff_var, return_axes=True)
def masked_term(coeff, var, cols):
return linexpr((coeff[cols], var[cols]))\
.reindex(index=axes[0], columns=axes[1], fill_value='').values
if ('StorageUnit', 'spill') in n.variables.index:
lhs += masked_term(-eh, get_var(n, c, 'spill'), spill.columns)
lhs += masked_term(eff_stand, prev_soc_cyclic, cyclic_i)
lhs += masked_term(eff_stand.loc[sns[1:]], soc.shift().loc[sns[1:]], noncyclic_i)
rhs = -get_as_dense(n, c, 'inflow', sns).mul(eh)
rhs.loc[sns[0], noncyclic_i] -= n.df(c).state_of_charge_initial[noncyclic_i]
define_constraints(n, lhs, '==', rhs, c, 'mu_state_of_charge')
def define_store_constraints(n, sns):
"""
Defines energy balance constraints for stores. In principal this states:
previous_e - p == e
"""
stores_i = n.stores.index
if stores_i.empty: return
c = 'Store'
variables = write_bound(n, -inf, inf, axes=[sns, stores_i])
set_varref(n, variables, c, 'p')
eh = expand_series(n.snapshot_weightings.stores[sns], stores_i) #elapsed hours
eff_stand = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh)
e = get_var(n, c, 'e')
cyclic_i = n.df(c).query('e_cyclic').index
noncyclic_i = n.df(c).query('~e_cyclic').index
previous_e_cyclic = e.shift().fillna(e.loc[sns[-1]])
coeff_var = [(-eh, get_var(n, c, 'p')), (-1, e)]
lhs, *axes = linexpr(*coeff_var, return_axes=True)
def masked_term(coeff, var, cols):
return linexpr((coeff[cols], var[cols]))\
.reindex(index=axes[0], columns=axes[1], fill_value='').values
lhs += masked_term(eff_stand, previous_e_cyclic, cyclic_i)
lhs += masked_term(eff_stand.loc[sns[1:]], e.shift().loc[sns[1:]], noncyclic_i)
rhs = pd.DataFrame(0, sns, stores_i)
rhs.loc[sns[0], noncyclic_i] -= n.df(c)['e_initial'][noncyclic_i]
define_constraints(n, lhs, '==', rhs, c, 'mu_state_of_charge')
def define_global_constraints(n, sns):
"""
Defines global constraints for the optimization. Possible types are
1. primary_energy
Use this to constraint the byproducts of primary energy sources as
CO2
2. transmission_volume_expansion_limit
Use this to set a limit for line volume expansion. Possible carriers
are 'AC' and 'DC'
3. transmission_expansion_cost_limit
Use this to set a limit for line expansion costs. Possible carriers
are 'AC' and 'DC'
"""
glcs = n.global_constraints.query('type == "primary_energy"')
for name, glc in glcs.iterrows():
rhs = glc.constant
lhs = ''
carattr = glc.carrier_attribute
emissions = n.carriers.query(f'{carattr} != 0')[carattr]
if emissions.empty: continue
# generators
gens = n.generators.query('carrier in @emissions.index')
if not gens.empty:
em_pu = gens.carrier.map(emissions)/gens.efficiency
em_pu = n.snapshot_weightings.generators[sns].to_frame('weightings') @\
em_pu.to_frame('weightings').T
vals = linexpr((em_pu, get_var(n, 'Generator', 'p')[gens.index]),
as_pandas=False)
lhs += join_exprs(vals)
# storage units
sus = n.storage_units.query('carrier in @emissions.index and '
'not cyclic_state_of_charge')
sus_i = sus.index
if not sus.empty:
coeff_val = (-sus.carrier.map(emissions), get_var(n, 'StorageUnit',
'state_of_charge').loc[sns[-1], sus_i])
vals = linexpr(coeff_val, as_pandas=False)
lhs = lhs + '\n' + join_exprs(vals)
rhs -= sus.carrier.map(emissions) @ sus.state_of_charge_initial
# stores (copy to avoid over-writing existing carrier attribute)
stores = n.stores.copy()
stores['carrier'] = stores.bus.map(n.buses.carrier)
stores = stores.query('carrier in @emissions.index and not e_cyclic')
if not stores.empty:
coeff_val = (-stores.carrier.map(emissions), get_var(n, 'Store', 'e')
.loc[sns[-1], stores.index])
vals = linexpr(coeff_val, as_pandas=False)
lhs = lhs + '\n' + join_exprs(vals)
rhs -= stores.carrier.map(emissions) @ stores.e_initial
con = write_constraint(n, lhs, glc.sense, rhs, axes=pd.Index([name]))
set_conref(n, con, 'GlobalConstraint', 'mu', name)
# for the next two to we need a line carrier
if len(n.global_constraints) > len(glcs):
n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier)
# expansion limits
glcs = n.global_constraints.query('type == '
'"transmission_volume_expansion_limit"')
substr = lambda s: re.sub('[\[\]\(\)]', '', s)
for name, glc in glcs.iterrows():
car = [substr(c.strip()) for c in glc.carrier_attribute.split(',')]
lhs = ''
for c, attr in (('Line', 's_nom'), ('Link', 'p_nom')):
if n.df(c).empty: continue
ext_i = n.df(c).query(f'carrier in @car and {attr}_extendable').index
if ext_i.empty: continue
v = linexpr((n.df(c).length[ext_i], get_var(n, c, attr)[ext_i]),
as_pandas=False)
lhs += '\n' + join_exprs(v)
if lhs == '': continue
sense = glc.sense
rhs = glc.constant
con = write_constraint(n, lhs, sense, rhs, axes=pd.Index([name]))
set_conref(n, con, 'GlobalConstraint', 'mu', name)
# expansion cost limits
glcs = n.global_constraints.query('type == '
'"transmission_expansion_cost_limit"')
for name, glc in glcs.iterrows():
car = [substr(c.strip()) for c in glc.carrier_attribute.split(',')]
lhs = ''
for c, attr in (('Line', 's_nom'), ('Link', 'p_nom')):
ext_i = n.df(c).query(f'carrier in @car and {attr}_extendable').index
if ext_i.empty: continue
v = linexpr((n.df(c).capital_cost[ext_i], get_var(n, c, attr)[ext_i]),
as_pandas=False)
lhs += '\n' + join_exprs(v)
if lhs == '': continue
sense = glc.sense
rhs = glc.constant
con = write_constraint(n, lhs, sense, rhs, axes=pd.Index([name]))
set_conref(n, con, 'GlobalConstraint', 'mu', name)
def define_objective(n, sns):
"""
Defines and writes out the objective function
"""
# constant for already done investment
nom_attr = nominal_attrs.items()
constant = 0
for c, attr in nom_attr:
ext_i = get_extendable_i(n, c)
constant += n.df(c)[attr][ext_i] @ n.df(c).capital_cost[ext_i]
object_const = write_bound(n, constant, constant)
write_objective(n, linexpr((-1, object_const), as_pandas=False)[0])
n.objective_constant = constant
for c, attr in lookup.query('marginal_cost').index:
cost = (get_as_dense(n, c, 'marginal_cost', sns)
.loc[:, lambda ds: (ds != 0).all()]
.mul(n.snapshot_weightings.objective[sns], axis=0))
if cost.empty: continue
terms = linexpr((cost, get_var(n, c, attr).loc[sns, cost.columns]))
write_objective(n, terms)
# investment
for c, attr in nominal_attrs.items():
cost = n.df(c)['capital_cost'][get_extendable_i(n, c)]
if cost.empty: continue
terms = linexpr((cost, get_var(n, c, attr)[cost.index]))
write_objective(n, terms)
def prepare_lopf(n, snapshots=None, keep_files=False, skip_objective=False,
extra_functionality=None, solver_dir=None):
"""
Sets up the linear problem and writes it out to a lp file.
Returns
-------
Tuple (fdp, problem_fn) indicating the file descriptor and the file name of
the lp file
"""
n._xCounter, n._cCounter = 1, 1
n.vars, n.cons = Dict(), Dict()
cols = ['component', 'name', 'pnl', 'specification']
n.variables = pd.DataFrame(columns=cols).set_index(cols[:2])
n.constraints = pd.DataFrame(columns=cols).set_index(cols[:2])
snapshots = n.snapshots if snapshots is None else snapshots
start = time.time()
tmpkwargs = dict(text=True, dir=solver_dir)
# mkstemp(suffix, prefix, **tmpkwargs)
fdo, objective_fn = mkstemp('.txt', 'pypsa-objectve-', **tmpkwargs)
fdc, constraints_fn = mkstemp('.txt', 'pypsa-constraints-', **tmpkwargs)
fdb, bounds_fn = mkstemp('.txt', 'pypsa-bounds-', **tmpkwargs)
fdi, binaries_fn = mkstemp('.txt', 'pypsa-binaries-', **tmpkwargs)
fdp, problem_fn = mkstemp('.lp', 'pypsa-problem-', **tmpkwargs)
n.objective_f = open(objective_fn, mode='w')
n.constraints_f = open(constraints_fn, mode='w')
n.bounds_f = open(bounds_fn, mode='w')
n.binaries_f = open(binaries_fn, mode='w')
n.objective_f.write('\* LOPF *\n\nmin\nobj:\n')
n.constraints_f.write("\n\ns.t.\n\n")
n.bounds_f.write("\nbounds\n")
n.binaries_f.write("\nbinary\n")
for c, attr in lookup.query('nominal and not handle_separately').index:
define_nominal_for_extendable_variables(n, c, attr)
# define_fixed_variable_constraints(n, snapshots, c, attr, pnl=False)
for c, attr in lookup.query('not nominal and not handle_separately').index:
define_dispatch_for_non_extendable_variables(n, snapshots, c, attr)
define_dispatch_for_extendable_and_committable_variables(n, snapshots, c, attr)
align_with_static_component(n, c, attr)
define_dispatch_for_extendable_constraints(n, snapshots, c, attr)
# define_fixed_variable_constraints(n, snapshots, c, attr)
define_generator_status_variables(n, snapshots)
define_nominal_constraints_per_bus_carrier(n, snapshots)
# consider only state_of_charge_set for the moment
define_fixed_variable_constraints(n, snapshots, 'StorageUnit', 'state_of_charge')
define_fixed_variable_constraints(n, snapshots, 'Store', 'e')
define_committable_generator_constraints(n, snapshots)
define_ramp_limit_constraints(n, snapshots)
define_storage_unit_constraints(n, snapshots)
define_store_constraints(n, snapshots)
define_kirchhoff_constraints(n, snapshots)
define_nodal_balance_constraints(n, snapshots)
define_global_constraints(n, snapshots)
if skip_objective:
logger.info("The argument `skip_objective` is set to True. Expecting a "
"custom objective to be build via `extra_functionality`.")
else:
define_objective(n, snapshots)
if extra_functionality is not None:
extra_functionality(n, snapshots)
n.binaries_f.write("end\n")
# explicit closing with file descriptor is necessary for windows machines
for f, fd in (('bounds_f', fdb), ('constraints_f', fdc),
('objective_f', fdo), ('binaries_f', fdi)):
getattr(n, f).close(); delattr(n, f); os.close(fd)
# concatenate files
with open(problem_fn, 'wb') as wfd:
for f in [objective_fn, constraints_fn, bounds_fn, binaries_fn]:
with open(f,'rb') as fd:
shutil.copyfileobj(fd, wfd)
if not keep_files:
os.remove(f)
logger.info(f'Total preparation time: {round(time.time()-start, 2)}s')
return fdp, problem_fn
def assign_solution(n, sns, variables_sol, constraints_dual,
keep_references=False, keep_shadowprices=None):
"""
Helper function. Assigns the solution of a succesful optimization to the
network.
"""
def set_from_frame(pnl, attr, df):
if attr not in pnl: #use this for subnetworks_t
pnl[attr] = df.reindex(n.snapshots)
elif pnl[attr].empty:
pnl[attr] = df.reindex(n.snapshots)
else:
pnl[attr].loc[sns, :] = df.reindex(columns=pnl[attr].columns)
pop = not keep_references
def map_solution(c, attr):
variables = get_var(n, c, attr, pop=pop)
predefined = True
if (c, attr) not in lookup.index:
predefined = False
n.sols[c] = n.sols[c] if c in n.sols else Dict(df=pd.DataFrame(), pnl={})
n.solutions.at[(c, attr), 'in_comp'] = predefined
if isinstance(variables, pd.DataFrame):
# case that variables are timedependent
n.solutions.at[(c, attr), 'pnl'] = True
pnl = n.pnl(c) if predefined else n.sols[c].pnl
values = variables.stack().map(variables_sol).unstack()
if c in n.passive_branch_components and attr == "s":
set_from_frame(pnl, 'p0', values)
set_from_frame(pnl, 'p1', - values)
elif c == 'Link' and attr == "p":
set_from_frame(pnl, 'p0', values)
for i in ['1'] + additional_linkports(n):
i_eff = '' if i == '1' else i
eff = get_as_dense(n, 'Link', f'efficiency{i_eff}', sns)
set_from_frame(pnl, f'p{i}', - values * eff)
pnl[f'p{i}'].loc[sns, n.links.index[n.links[f'bus{i}'] == ""]] = \
n.component_attrs['Link'].loc[f'p{i}','default']
else:
set_from_frame(pnl, attr, values)
else:
# case that variables are static
n.solutions.at[(c, attr), 'pnl'] = False
sol = variables.map(variables_sol)
if predefined:
non_ext = n.df(c)[attr]
n.df(c)[attr + '_opt'] = sol.reindex(non_ext.index).fillna(non_ext)
else:
n.sols[c].df[attr] = sol
n.sols = Dict()
n.solutions = pd.DataFrame(index=n.variables.index, columns=['in_comp', 'pnl'])
for c, attr in n.variables.index:
map_solution(c, attr)
# if nominal capacity was no variable set optimal value to nominal
for c, attr in lookup.query('nominal').index.difference(n.variables.index):
n.df(c)[attr+'_opt'] = n.df(c)[attr]
# recalculate storageunit net dispatch
if not n.df('StorageUnit').empty:
c = 'StorageUnit'
n.pnl(c)['p'] = n.pnl(c)['p_dispatch'] - n.pnl(c)['p_store']
# duals
if keep_shadowprices == False:
keep_shadowprices = []
sp = n.constraints.index
if isinstance(keep_shadowprices, list):
sp = sp[sp.isin(keep_shadowprices, level=0)]
def map_dual(c, attr):
# If c is a pypsa component name the dual is stored at n.pnl(c)
# or n.df(c). For the second case the index of the constraints have to
# be a subset of n.df(c).index otherwise the dual is stored at
# n.duals[c].df
constraints = get_con(n, c, attr, pop=pop)
is_pnl = isinstance(constraints, pd.DataFrame)
# TODO: setting the sign is not very clear
sign = 1 if 'upper' in attr or attr == 'marginal_price' else -1
n.dualvalues.at[(c, attr), 'pnl'] = is_pnl
to_component = c in n.all_components
if is_pnl:
n.dualvalues.at[(c, attr), 'in_comp'] = to_component
duals = constraints.stack().map(sign * constraints_dual).unstack()
if c not in n.duals and not to_component:
n.duals[c] = Dict(df=pd.DataFrame(), pnl={})
pnl = n.pnl(c) if to_component else n.duals[c].pnl
set_from_frame(pnl, attr, duals)
else:
# here to_component can change
duals = constraints.map(sign * constraints_dual)
if to_component:
to_component = (duals.index.isin(n.df(c).index).all())
n.dualvalues.at[(c, attr), 'in_comp'] = to_component
if c not in n.duals and not to_component:
n.duals[c] = Dict(df=pd.DataFrame(), pnl={})
df = n.df(c) if to_component else n.duals[c].df
df[attr] = duals
n.duals = Dict()
n.dualvalues = pd.DataFrame(index=sp, columns=['in_comp', 'pnl'])
# extract shadow prices attached to components
for c, attr in sp:
map_dual(c, attr)
# correct prices for snapshot weightings
n.buses_t.marginal_price.loc[sns] = (
n.buses_t.marginal_price.loc[sns].divide(
n.snapshot_weightings.objective.loc[sns],axis=0))
# discard remaining if wanted
if not keep_references:
for c, attr in n.constraints.index.difference(sp):
get_con(n, c, attr, pop)
# load
if len(n.loads):
set_from_frame(n.pnl('Load'), 'p', get_as_dense(n, 'Load', 'p_set', sns))
# clean up vars and cons
for c in list(n.vars):
if n.vars[c].df.empty and n.vars[c].pnl == {}: n.vars.pop(c)
for c in list(n.cons):
if n.cons[c].df.empty and n.cons[c].pnl == {}: n.cons.pop(c)
# recalculate injection
ca = [('Generator', 'p', 'bus' ), ('Store', 'p', 'bus'),
('Load', 'p', 'bus'), ('StorageUnit', 'p', 'bus'),
('Link', 'p0', 'bus0'), ('Link', 'p1', 'bus1')]
for i in additional_linkports(n):
ca.append(('Link', f'p{i}', f'bus{i}'))
sign = lambda c: n.df(c).sign if 'sign' in n.df(c) else -1 #sign for 'Link'
n.buses_t.p = pd.concat(
[n.pnl(c)[attr].mul(sign(c)).rename(columns=n.df(c)[group])
for c, attr, group in ca], axis=1).groupby(level=0, axis=1).sum()\
.reindex(columns=n.buses.index, fill_value=0)
def v_ang_for_(sub):
buses_i = sub.buses_o
if len(buses_i) == 1:
return pd.DataFrame(0, index=sns, columns=buses_i)
sub.calculate_B_H(skip_pre=True)
Z = pd.DataFrame(np.linalg.pinv((sub.B).todense()), buses_i, buses_i)
Z -= Z[sub.slack_bus]
return n.buses_t.p.reindex(columns=buses_i) @ Z
n.buses_t.v_ang = (pd.concat([v_ang_for_(sub) for sub in n.sub_networks.obj],
axis=1)
.reindex(columns=n.buses.index, fill_value=0))
def network_lopf(n, snapshots=None, solver_name="cbc",
solver_logfile=None, extra_functionality=None, skip_objective=False,
skip_pre=False, extra_postprocessing=None, formulation="kirchhoff",
keep_references=False, keep_files=False,
keep_shadowprices=['Bus', 'Line', 'Transformer', 'Link', 'GlobalConstraint'],
solver_options=None, warmstart=False, store_basis=False,
solver_dir=None):
"""
Linear optimal power flow for a group of snapshots.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
solver_name : string
Must be a solver name that pyomo recognises and that is
installed, e.g. "glpk", "gurobi"
solver_logfile : None|string
If not None, sets the logfile option of the solver.
solver_options : dictionary
A dictionary with additional options that get passed to the solver.
(e.g. {'threads':2} tells gurobi to use only 2 cpus)
solver_dir : str, default None
Path to directory where necessary files are written, default None leads
to the default temporary directory used by tempfile.mkstemp().
keep_files : bool, default False
Keep the files that pyomo constructs from OPF problem
construction, e.g. .lp file - useful for debugging
formulation : string
Formulation of the linear power flow equations to use; must be
one of ["angles","cycles","kirchhoff","ptdf"]
extra_functionality : callable function
This function must take two arguments
`extra_functionality(network,snapshots)` and is called after
the model building is complete, but before it is sent to the
solver. It allows the user to
add/change constraints and add/change the objective function.
skip_pre : bool, default False
Skip the preliminary steps of computing topology.
skip_objective : bool, default False
Skip writing the default objective function. If False, a custom
objective has to be defined via extra_functionality.
extra_postprocessing : callable function
This function must take three arguments
`extra_postprocessing(network,snapshots,duals)` and is called after
the model has solved and the results are extracted. It allows the user
to extract further information about the solution, such as additional
shadow prices.
warmstart : bool or string, default False
Use this to warmstart the optimization. Pass a string which gives
the path to the basis file. If set to True, a path to
a basis file must be given in network.basis_fn.
store_basis : bool, default False
Whether to store the basis of the optimization results. If True,
the path to the basis file is saved in network.basis_fn. Note that
a basis can only be stored if simplex, dual-simplex, or barrier
*with* crossover is used for solving.
keep_references : bool, default False
Keep the references of variable and constraint names withing the
network. These can be looked up in `n.vars` and `n.cons` after solving.
keep_shadowprices : bool or list of component names
Keep shadow prices for all constraints, if set to True. If a list
is passed the shadow prices will only be parsed for those constraint
names. Defaults to ['Bus', 'Line', 'GlobalConstraint'].
After solving, the shadow prices can be retrieved using
:func:`pypsa.linopt.get_dual` with corresponding name
"""
supported_solvers = ["cbc", "gurobi", 'glpk', 'cplex', 'xpress']
if solver_name not in supported_solvers:
raise NotImplementedError(f"Solver {solver_name} not in "
f"supported solvers: {supported_solvers}")
if formulation != "kirchhoff":
raise NotImplementedError("Only the kirchhoff formulation is supported")
if n.generators.committable.any():
logger.warning("Unit commitment is not yet completely implemented for "
"optimising without pyomo. Thus minimum up time, minimum down time, "
"start up costs, shut down costs will be ignored.")
snapshots = _as_snapshots(n, snapshots)
if not skip_pre:
n.calculate_dependent_values()
n.determine_network_topology()
logger.info("Prepare linear problem")
fdp, problem_fn = prepare_lopf(n, snapshots, keep_files, skip_objective,
extra_functionality, solver_dir)
fds, solution_fn = mkstemp(prefix='pypsa-solve', suffix='.sol', dir=solver_dir)
if warmstart == True:
warmstart = n.basis_fn
logger.info("Solve linear problem using warmstart")
else:
logger.info(f"Solve linear problem using {solver_name.title()} solver")
solve = eval(f'run_and_read_{solver_name}')
res = solve(n, problem_fn, solution_fn, solver_logfile,
solver_options, warmstart, store_basis)
status, termination_condition, variables_sol, constraints_dual, obj = res
if not keep_files:
os.close(fdp); os.remove(problem_fn)
os.close(fds); os.remove(solution_fn)
if status == "ok" and termination_condition == "optimal":
logger.info('Optimization successful. Objective value: {:.2e}'.format(obj))
elif status == "warning" and termination_condition == "suboptimal":
logger.warning('Optimization solution is sub-optimal. '
'Objective value: {:.2e}'.format(obj))
else:
logger.warning(f'Optimization failed with status {status} and '
f'termination condition {termination_condition}')
return status, termination_condition
n.objective = obj
assign_solution(n, snapshots, variables_sol, constraints_dual,
keep_references=keep_references,
keep_shadowprices=keep_shadowprices)
gc.collect()
return status,termination_condition
def ilopf(n, snapshots=None, msq_threshold=0.05, min_iterations=1,
max_iterations=100, track_iterations=False, **kwargs):
'''
Iterative linear optimization updating the line parameters for passive
AC and DC lines. This is helpful when line expansion is enabled. After each
sucessful solving, line impedances and line resistance are recalculated
based on the optimization result. If warmstart is possible, it uses the
result from the previous iteration to fasten the optimization.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
msq_threshold: float, default 0.05
Maximal mean square difference between optimized line capacity of
the current and the previous iteration. As soon as this threshold is
undercut, and the number of iterations is bigger than 'min_iterations'
the iterative optimization stops
min_iterations : integer, default 1
Minimal number of iteration to run regardless whether the msq_threshold
is already undercut
max_iterations : integer, default 100
Maximal number of iterations to run regardless whether msq_threshold
is already undercut
track_iterations: bool, default False
If True, the intermediate branch capacities and values of the
objective function are recorded for each iteration. The values of
iteration 0 represent the initial state.
**kwargs
Keyword arguments of the lopf function which runs at each iteration
'''
n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier)
ext_i = get_extendable_i(n, 'Line')
typed_i = n.lines.query('type != ""').index
ext_untyped_i = ext_i.difference(typed_i)
ext_typed_i = ext_i.intersection(typed_i)
base_s_nom = (np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom))
n.lines.loc[ext_typed_i, 'num_parallel'] = (n.lines.s_nom/base_s_nom)[ext_typed_i]
def update_line_params(n, s_nom_prev):
factor = n.lines.s_nom_opt / s_nom_prev
for attr, carrier in (('x', 'AC'), ('r', 'DC')):
ln_i = (n.lines.query('carrier == @carrier').index.intersection(ext_untyped_i))
n.lines.loc[ln_i, attr] /= factor[ln_i]
ln_i = ext_i.intersection(typed_i)
n.lines.loc[ln_i, 'num_parallel'] = (n.lines.s_nom_opt/base_s_nom)[ln_i]
def msq_diff(n, s_nom_prev):
lines_err = np.sqrt((s_nom_prev - n.lines.s_nom_opt).pow(2).mean()) / \
n.lines['s_nom_opt'].mean()
logger.info(f"Mean square difference after iteration {iteration} is "
f"{lines_err}")
return lines_err
def save_optimal_capacities(n, iteration, status):
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_{iteration}'] = n.df(c)[f'{attr}_opt']
setattr(n, f"status_{iteration}", status)
setattr(n, f"objective_{iteration}", n.objective)
n.iteration = iteration
n.global_constraints = n.global_constraints.rename(columns={'mu': f'mu_{iteration}'})
if track_iterations:
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_0'] = n.df(c)[f'{attr}']
iteration = 1
kwargs['store_basis'] = True
diff = msq_threshold
while diff >= msq_threshold or iteration < min_iterations:
if iteration > max_iterations:
logger.info(f'Iteration {iteration} beyond max_iterations '
f'{max_iterations}. Stopping ...')
break
s_nom_prev = n.lines.s_nom_opt.copy() if iteration else n.lines.s_nom.copy()
kwargs['warmstart'] = bool(iteration and ('basis_fn' in n.__dir__()))
status, termination_condition = network_lopf(n, snapshots, **kwargs)
assert status == 'ok', (f'Optimization failed with status {status}'
f'and termination {termination_condition}')
if track_iterations:
save_optimal_capacities(n, iteration, status)
update_line_params(n, s_nom_prev)
diff = msq_diff(n, s_nom_prev)
iteration += 1
logger.info('Running last lopf with fixed branches (HVDC links and HVAC lines)')
ext_dc_links_b = n.links.p_nom_extendable & (n.links.carrier == "DC")
s_nom_orig = n.lines.s_nom.copy()
p_nom_orig = n.links.p_nom.copy()
n.lines.loc[ext_i, ['s_nom', 's_nom_extendable']] = n.lines.loc[ext_i, 's_nom_opt'], False
n.links.loc[ext_dc_links_b, ["p_nom", "p_nom_extendable"]] = n.links.loc[ext_dc_links_b, "p_nom_opt"], False
kwargs['warmstart'] = False
network_lopf(n, snapshots, **kwargs)
n.lines.loc[ext_i, ['s_nom', 's_nom_extendable']] = s_nom_orig.loc[ext_i], True
n.links.loc[ext_dc_links_b, ['p_nom', 'p_nom_extendable']] = p_nom_orig.loc[ext_dc_links_b], True
## add costs of additional infrastructure to objective value of last iteration
obj_links = n.links[ext_dc_links_b].eval("capital_cost * (p_nom_opt - p_nom_min)").sum()
obj_lines = n.lines.eval("capital_cost * (s_nom_opt - s_nom_min)").sum()
n.objective += obj_links + obj_lines
n.objective_constant -= (obj_links + obj_lines)
| gpl-3.0 |
pvlib/pvlib-python | pvlib/tests/iotools/test_crn.py | 2 | 3259 | import pandas as pd
import numpy as np
from numpy import dtype, nan
import pytest
from pvlib.iotools import crn
from ..conftest import DATA_DIR, assert_frame_equal
@pytest.fixture
def columns():
return [
'WBANNO', 'UTC_DATE', 'UTC_TIME', 'LST_DATE', 'LST_TIME', 'CRX_VN',
'longitude', 'latitude', 'temp_air', 'PRECIPITATION', 'ghi',
'ghi_flag',
'SURFACE_TEMPERATURE', 'ST_TYPE', 'ST_FLAG', 'relative_humidity',
'relative_humidity_flag', 'SOIL_MOISTURE_5', 'SOIL_TEMPERATURE_5',
'WETNESS', 'WET_FLAG', 'wind_speed', 'wind_speed_flag']
@pytest.fixture
def dtypes():
return [
dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64'),
dtype('int64'), dtype('O'), dtype('float64'), dtype('float64'),
dtype('float64'), dtype('float64'), dtype('float64'),
dtype('int64'), dtype('float64'), dtype('O'), dtype('int64'),
dtype('float64'), dtype('int64'), dtype('float64'),
dtype('float64'), dtype('int64'), dtype('int64'), dtype('float64'),
dtype('int64')]
@pytest.fixture
def testfile():
return DATA_DIR / 'CRNS0101-05-2019-AZ_Tucson_11_W.txt'
@pytest.fixture
def testfile_problems():
return DATA_DIR / 'CRN_with_problems.txt'
def test_read_crn(testfile, columns, dtypes):
index = pd.DatetimeIndex(['2019-01-01 16:10:00',
'2019-01-01 16:15:00',
'2019-01-01 16:20:00',
'2019-01-01 16:25:00'],
freq=None).tz_localize('UTC')
values = np.array([
[53131, 20190101, 1610, 20190101, 910, 3, -111.17, 32.24, nan,
0.0, 296.0, 0, 4.4, 'C', 0, 90.0, 0, nan, nan, 24, 0, 0.78, 0],
[53131, 20190101, 1615, 20190101, 915, 3, -111.17, 32.24, 3.3,
0.0, 183.0, 0, 4.0, 'C', 0, 87.0, 0, nan, nan, 1182, 0, 0.36, 0],
[53131, 20190101, 1620, 20190101, 920, 3, -111.17, 32.24, 3.5,
0.0, 340.0, 0, 4.3, 'C', 0, 83.0, 0, nan, nan, 1183, 0, 0.53, 0],
[53131, 20190101, 1625, 20190101, 925, 3, -111.17, 32.24, 4.0,
0.0, 393.0, 0, 4.8, 'C', 0, 81.0, 0, nan, nan, 1223, 0, 0.64, 0]])
expected = pd.DataFrame(values, columns=columns, index=index)
for (col, _dtype) in zip(expected.columns, dtypes):
expected[col] = expected[col].astype(_dtype)
out = crn.read_crn(testfile)
assert_frame_equal(out, expected)
def test_read_crn_problems(testfile_problems, columns, dtypes):
# GH1025
index = pd.DatetimeIndex(['2020-07-06 12:00:00',
'2020-07-06 13:10:00'],
freq=None).tz_localize('UTC')
values = np.array([
[92821, 20200706, 1200, 20200706, 700, '3', -80.69, 28.62, 24.9,
0.0, 190.0, 0, 25.5, 'C', 0, 93.0, 0, nan, nan, 990, 0, 1.57, 0],
[92821, 20200706, 1310, 20200706, 810, '2.623', -80.69, 28.62,
26.9, 0.0, 430.0, 0, 30.2, 'C', 0, 87.0, 0, nan, nan, 989, 0,
1.64, 0]])
expected = pd.DataFrame(values, columns=columns, index=index)
for (col, _dtype) in zip(expected.columns, dtypes):
expected[col] = expected[col].astype(_dtype)
out = crn.read_crn(testfile_problems)
assert_frame_equal(out, expected)
| bsd-3-clause |
kenji0x02/srcnn-from-scratch | core/resume_train_srcnn.py | 1 | 1279 | # coding: utf-8
from load_image import load_image
import numpy as np
import matplotlib.pyplot as plt
from srcnn import SRCNN
from common.trainer import Trainer
(x_train, t_train), (x_test, t_test) = load_image()
print('resume training...')
start_epoch = 100
max_epochs = 50000
network = SRCNN(params={'f1': 9, 'f2': 1, 'f3': 5, 'n1': 64, 'n2': 32, 'channel': 3},
weight_init_std=0.01)
start_epoch_file = "./result/params_epoch_" + "{0:06d}".format(start_epoch) + ".pkl"
network.load_params(start_epoch_file)
trainer = Trainer(network, x_train, t_train, x_test, t_test,
epochs=max_epochs, mini_batch_size=50,
optimizer='Adam', optimizer_param={'lr': 0.01},
evaluate_sample_num_per_epoch=50,
start_epoch=start_epoch)
trainer.train()
# パラメータの保存
network.save_params("./result/srcnn_params.pkl")
print("Saved Network Parameters!")
# グラフの描画
markers = {'train': 'o', 'test': 's'}
x = np.arange(max_epochs)
plt.plot(x, trainer.train_acc_list, marker='o', label='train', markevery=2)
plt.plot(x, trainer.test_acc_list, marker='s', label='test', markevery=2)
plt.xlabel("epochs")
plt.ylabel("PSNR")
plt.legend(loc='lower right')
plt.show()
| mit |
cancan101/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | 18 | 5832 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
hainm/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
pb-pravin/data-science-from-scratch | code/recommender_systems.py | 60 | 6291 | from __future__ import division
import math, random
from collections import defaultdict, Counter
from linear_algebra import dot
users_interests = [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
popular_interests = Counter(interest
for user_interests in users_interests
for interest in user_interests).most_common()
def most_popular_new_interests(user_interests, max_results=5):
suggestions = [(interest, frequency)
for interest, frequency in popular_interests
if interest not in user_interests]
return suggestions[:max_results]
#
# user-based filtering
#
def cosine_similarity(v, w):
return dot(v, w) / math.sqrt(dot(v, v) * dot(w, w))
unique_interests = sorted(list({ interest
for user_interests in users_interests
for interest in user_interests }))
def make_user_interest_vector(user_interests):
"""given a list of interests, produce a vector whose i-th element is 1
if unique_interests[i] is in the list, 0 otherwise"""
return [1 if interest in user_interests else 0
for interest in unique_interests]
user_interest_matrix = map(make_user_interest_vector, users_interests)
user_similarities = [[cosine_similarity(interest_vector_i, interest_vector_j)
for interest_vector_j in user_interest_matrix]
for interest_vector_i in user_interest_matrix]
def most_similar_users_to(user_id):
pairs = [(other_user_id, similarity) # find other
for other_user_id, similarity in # users with
enumerate(user_similarities[user_id]) # nonzero
if user_id != other_user_id and similarity > 0] # similarity
return sorted(pairs, # sort them
key=lambda (_, similarity): similarity, # most similar
reverse=True) # first
def user_based_suggestions(user_id, include_current_interests=False):
# sum up the similarities
suggestions = defaultdict(float)
for other_user_id, similarity in most_similar_users_to(user_id):
for interest in users_interests[other_user_id]:
suggestions[interest] += similarity
# convert them to a sorted list
suggestions = sorted(suggestions.items(),
key=lambda (_, weight): weight,
reverse=True)
# and (maybe) exclude already-interests
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
#
# Item-Based Collaborative Filtering
#
interest_user_matrix = [[user_interest_vector[j]
for user_interest_vector in user_interest_matrix]
for j, _ in enumerate(unique_interests)]
interest_similarities = [[cosine_similarity(user_vector_i, user_vector_j)
for user_vector_j in interest_user_matrix]
for user_vector_i in interest_user_matrix]
def most_similar_interests_to(interest_id):
similarities = interest_similarities[interest_id]
pairs = [(unique_interests[other_interest_id], similarity)
for other_interest_id, similarity in enumerate(similarities)
if interest_id != other_interest_id and similarity > 0]
return sorted(pairs,
key=lambda (_, similarity): similarity,
reverse=True)
def item_based_suggestions(user_id, include_current_interests=False):
suggestions = defaultdict(float)
user_interest_vector = user_interest_matrix[user_id]
for interest_id, is_interested in enumerate(user_interest_vector):
if is_interested == 1:
similar_interests = most_similar_interests_to(interest_id)
for interest, similarity in similar_interests:
suggestions[interest] += similarity
suggestions = sorted(suggestions.items(),
key=lambda (_, similarity): similarity,
reverse=True)
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
if __name__ == "__main__":
print "Popular Interests"
print popular_interests
print
print "Most Popular New Interests"
print "already like:", ["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"]
print most_popular_new_interests(["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"])
print
print "already like:", ["R", "Python", "statistics", "regression", "probability"]
print most_popular_new_interests(["R", "Python", "statistics", "regression", "probability"])
print
print "User based similarity"
print "most similar to 0"
print most_similar_users_to(0)
print "Suggestions for 0"
print user_based_suggestions(0)
print
print "Item based similarity"
print "most similar to 'Big Data'"
print most_similar_interests_to(0)
print
print "suggestions for user 0"
print item_based_suggestions(0)
| unlicense |
mailhexu/pyDFTutils | pyDFTutils/unfolding/unfolding/DDB_unfolder.py | 1 | 7020 | #!/usr/bin/env python
import abipy.abilab as abilab
import numpy as np
from ase.build import bulk
from ase.dft.kpoints import get_special_points, bandpath
from pyDFTutils.unfolding.phonon_unfolder import phonon_unfolder
from pyDFTutils.phonon.plotphon import plot_band_weight
import matplotlib.pyplot as plt
atomic_masses = np.array([
1., 1.008, 4.002602, 6.94, 9.0121831, 10.81, 12.011, 14.007, 15.999,
18.99840316, 20.1797, 22.98976928, 24.305, 26.9815385, 28.085, 30.973762,
32.06, 35.45, 39.948, 39.0983, 40.078, 44.955908, 47.867, 50.9415, 51.9961,
54.938044, 55.845, 58.933194, 58.6934, 63.546, 65.38, 69.723, 72.63,
74.921595, 78.971, 79.904, 83.798, 85.4678, 87.62, 88.90584, 91.224,
92.90637, 95.95, 97.90721, 101.07, 102.9055, 106.42, 107.8682, 112.414,
114.818, 118.71, 121.76, 127.6, 126.90447, 131.293, 132.90545196, 137.327,
138.90547, 140.116, 140.90766, 144.242, 144.91276, 150.36, 151.964, 157.25,
158.92535, 162.5, 164.93033, 167.259, 168.93422, 173.054, 174.9668, 178.49,
180.94788, 183.84, 186.207, 190.23, 192.217, 195.084, 196.966569, 200.592,
204.38, 207.2, 208.9804, 208.98243, 209.98715, 222.01758, 223.01974,
226.02541, 227.02775, 232.0377, 231.03588, 238.02891, 237.04817, 244.06421,
243.06138, 247.07035, 247.07031, 251.07959, 252.083, 257.09511, 258.09843,
259.101, 262.11, 267.122, 268.126, 271.134, 270.133, 269.1338, 278.156,
281.165, 281.166, 285.177, 286.182, 289.19, 289.194, 293.204, 293.208,
294.214
])
def kpath():
#DDB = abilab.abiopen('out_DDB')
#struct = DDB.structure
#atoms = DDB.structure.to_ase_atoms()
atoms = bulk('Cu','fcc')
points = get_special_points('fcc', atoms.cell, eps=0.01)
GXW = [points[k] for k in 'GXWGL']
kpts, x, X = bandpath(GXW, atoms.cell, 700)
names = ['$\Gamma$', 'X', 'W', '$\Gamma$', 'L']
return kpts, x, X, names, GXW
def displacement_cart_to_evec(displ_cart, masses, scaled_positions, qpoint=None, add_phase=True):
"""
displ_cart: cartisien displacement. (atom1_x, atom1_y, atom1_z, atom2_x, ...)
masses: masses of atoms.
scaled_postions: scaled postions of atoms.
qpoint: if phase needs to be added, qpoint must be given.
add_phase: whether to add phase to the eigenvectors.
"""
if add_phase and qpoint is None:
raise ValueError('qpoint must be given if adding phase is needed')
m = np.sqrt(np.kron(masses,[1,1,1]))
evec=displ_cart *m
if add_phase:
phase = [np.exp(-2j*np.pi*np.dot(pos,qpoint)) for pos in scaled_positions]
phase = np.kron(phase,[1,1,1])
evec*=phase
evec /= np.linalg.norm(evec)
return evec
def DDB_unfolder(DDB_fname, kpath_bounds,sc_mat):
DDB = abilab.abiopen(DDB_fname)
struct = DDB.structure
atoms = DDB.structure.to_ase_atoms()
scaled_positions = struct.frac_coords
cell = struct.lattice_vectors()
numbers = struct.atomic_numbers
masses = [atomic_masses[i] for i in numbers]
#print numbers
#print cell
#print scaled_positions
#print kpath_bounds
phbst, phdos = DDB.anaget_phbst_and_phdos_files(
nqsmall=2,
asr=1,
chneut=1,
dipdip=0,
verbose=1,
lo_to_splitting=False,
qptbounds=kpath_bounds,
)
#phbst.plot_phbands()
qpoints = phbst.qpoints.frac_coords
nqpts = len(qpoints)
nbranch = 3 * len(numbers)
evals = np.zeros([nqpts, nbranch])
evecs = np.zeros([nqpts, nbranch, nbranch], dtype='complex128')
m = np.sqrt(np.kron(masses,[1,1,1]))
#positions=np.kron(scaled_positions,[1,1,1])
for iqpt, qpt in enumerate(qpoints):
for ibranch in range(nbranch):
phmode = phbst.get_phmode(qpt, ibranch)
evals[iqpt, ibranch] = phmode.freq
#evec=phmode.displ_cart *m
#phase = [np.exp(-2j*np.pi*np.dot(pos,qpt)) for pos in scaled_positions]
#phase = np.kron(phase,[1,1,1])
#evec*=phase
#evec /= np.linalg.norm(evec)
evec=displacement_cart_to_evec(phmode.displ_cart, masses, scaled_positions, qpoint=qpt, add_phase=True)
evecs[iqpt,:,ibranch] = evec
uf = phonon_unfolder(atoms,sc_mat,evecs,qpoints,phase=False)
weights = uf.get_weights()
x=np.arange(nqpts)
freqs=evals
names = ['$\Gamma$', 'X', 'W', '$\Gamma$', 'L']
#ax=plot_band_weight([list(x)]*freqs.shape[1],freqs.T*33.356,weights[:,:].T*0.98+0.01,xticks=[names,X],axis=ax)
ax=plot_band_weight([list(x)]*freqs.shape[1],freqs.T*27*33.356,weights[:,:].T*0.98+0.01,xticks=[names,[1,2,3,4,5]],style='alpha')
plt.show()
def nc_unfolder(fname, sc_mat, kx=None, knames=None ,ghost_atoms=None):
ncfile=abilab.abiopen(fname)
struct = ncfile.structure
atoms = ncfile.structure.to_ase_atoms()
scaled_positions = struct.frac_coords
cell = struct.lattice_vectors()
numbers = struct.atomic_numbers
masses = [atomic_masses[i] for i in numbers]
#print numbers
#print cell
#print scaled_positions
#print kpath_bounds
phbst = ncfile.phbands
#phbst.plot_phbands()
qpoints = phbst.qpoints.frac_coords
nqpts = len(qpoints)
nbranch = 3 * len(numbers)
evals = np.zeros([nqpts, nbranch])
evecs = np.zeros([nqpts, nbranch, nbranch], dtype='complex128')
m = np.sqrt(np.kron(masses,[1,1,1]))
#positions=np.kron(scaled_positions,[1,1,1])
freqs=phbst.phfreqs
displ_carts=phbst.phdispl_cart
for iqpt, qpt in enumerate(qpoints):
print(iqpt, qpt)
for ibranch in range(nbranch):
#phmode = ncfile.get_phmode(qpt, ibranch)
#print(2)
evals[iqpt, ibranch] = freqs[iqpt, ibranch]
#evec=phmode.displ_cart *m
#phase = [np.exp(-2j*np.pi*np.dot(pos,qpt)) for pos in scaled_positions]
#phase = np.kron(phase,[1,1,1])
#evec*=phase
#evec /= np.linalg.norm(evec)
evec=displacement_cart_to_evec(displ_carts[iqpt, ibranch,: ], masses, scaled_positions, qpoint=qpt, add_phase=True)
evecs[iqpt,:,ibranch] = evec
uf = phonon_unfolder(atoms,sc_mat,evecs,qpoints,phase=False, ghost_atoms=ghost_atoms)
weights = uf.get_weights()
x=np.arange(nqpts)
freqs=evals
#names = ['$\Gamma$', 'X', 'W', '$\Gamma$', 'L']
#ax=plot_band_weight([list(x)]*freqs.shape[1],freqs.T*33.356,weights[:,:].T*0.98+0.01,xticks=[names,X],axis=ax)
ax=plot_band_weight([list(x)]*freqs.shape[1],freqs.T*8065.6,weights[:,:].T*0.98+0.01,xticks=[knames, kx],style='alpha')
#plt.show()
return ax
def main():
#sc_mat = np.linalg.inv((np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]) / 2.0))
#sc_mat=np.array([[0,1,1],[1,0,1],[1,1,0]])
sc_mat=np.eye(3)
#points = kpath()[-1]
points=np.array([(0,0,0),(0,.5,0),(0.5,0.5,0),[.5,.5,.5],[0,0,0]])
DDB_unfolder(DDB_fname='out_DDB', kpath_bounds = [np.dot(k, sc_mat) for k in points],sc_mat=sc_mat)
#main()
| lgpl-3.0 |
mavlab2015/paparazzi | sw/airborne/test/stabilization/compare_ref_quat.py | 38 | 1206 | #! /usr/bin/env python
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import ref_quat_float
import ref_quat_int
steps = 512 * 2
ref_eul_res = np.zeros((steps, 3))
ref_quat_res = np.zeros((steps, 3))
ref_quat_float.init()
ref_quat_int.init()
# reset psi and update_ref_quat_from_eulers
ref_quat_float.enter()
ref_quat_int.enter()
q_sp = np.array([0.92387956, 0.38268346, 0., 0.])
ref_quat_float.sp_quat.array = q_sp
ref_quat_int.sp_quat.array = q_sp * (1 << 15)
for i in range(0, steps):
ref_quat_float.update()
ref_eul_res[i, :] = ref_quat_float.ref_euler.array
ref_quat_int.update()
ref_quat_res[i, :] = ref_quat_int.ref_euler.array / (1 << 20)
plt.figure(1)
plt.subplot(311)
plt.title("reference in euler angles")
plt.plot(np.degrees(ref_eul_res[:, 0]), 'g')
plt.plot(np.degrees(ref_quat_res[:, 0]), 'r')
plt.ylabel("phi [deg]")
plt.subplot(312)
plt.plot(np.degrees(ref_eul_res[:, 1]), 'g')
plt.plot(np.degrees(ref_quat_res[:, 1]), 'r')
plt.ylabel("theta [deg]")
plt.subplot(313)
plt.plot(np.degrees(ref_eul_res[:, 2]), 'g')
plt.plot(np.degrees(ref_quat_res[:, 2]), 'r')
plt.ylabel("psi [deg]")
plt.show()
| gpl-2.0 |
Eric89GXL/scikit-learn | examples/linear_model/plot_ols_3d.py | 8 | 2024 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import pylab as pl
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = pl.figure(fig_num, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
pl.show()
| bsd-3-clause |
AlexRobson/nilmtk | nilmtk/dataset_converters/eco/convert_eco.py | 6 | 7138 | import pandas as pd
import numpy as np
import sys
from os import listdir, getcwd
from os.path import isdir, join, dirname, abspath
from pandas.tools.merge import concat
from nilmtk.utils import get_module_directory, check_directory_exists
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilm_metadata import convert_yaml_to_hdf5
from inspect import currentframe, getfile, getsourcefile
from sys import getfilesystemencoding
"""
DATASET STRUCTURE:
------------------
On extracting all the dataset values, we should arrive at a similar directory structure as
mentioned.
ECO Dataset will have a folder '<i>_sm_csv' and '<i>_plug_csv' where i is the building no.
<i>_sm_csv has a folder 01
<i>_plug_csv has a folder 01, 02,....<n> where n is the plug numbers.
Each folder has a CSV file as per each day, with each day csv file containing
86400 entries.
"""
plugs_column_name = {1:('power', 'active'),
};
def convert_eco(dataset_loc, hdf_filename, timezone):
"""
Parameters:
-----------
dataset_loc: str
The root directory where the dataset is located.
hdf_filename: str
The location where the hdf_filename is present.
The directory location has to contain the
hdf5file name for the converter to work.
timezone: str
specifies the timezone of the dataset.
"""
# Creating a new HDF File
store = pd.HDFStore(hdf_filename, 'w', complevel=9, complib='blosc')
check_directory_exists(dataset_loc)
directory_list = [i for i in listdir(dataset_loc) if '.txt' not in i]
directory_list.sort()
print directory_list
# Traversing every folder
for folder in directory_list:
if folder[0] == '.' or folder[-3:] == '.h5':
print 'Skipping ', folder
continue
print 'Computing for folder',folder
#Building number and meter_flag
building_no = int(folder[:2])
meter_flag = 'sm' if 'sm_csv' in folder else 'plugs'
dir_list = [i for i in listdir(join(dataset_loc, folder)) if isdir(join(dataset_loc,folder,i))]
dir_list.sort()
print 'Current dir list:',dir_list
for fl in dir_list:
print 'Computing for folder ',fl
fl_dir_list = [i for i in listdir(join(dataset_loc,folder,fl)) if '.csv' in i]
fl_dir_list.sort()
if meter_flag == 'sm':
for fi in fl_dir_list:
df = pd.read_csv(join(dataset_loc,folder,fl,fi), names=[i for i in range(1,17)], dtype=np.float32)
for phase in range(1,4):
key = str(Key(building=building_no, meter=phase))
df_phase = df.ix[:,[1+phase, 5+phase, 8+phase, 13+phase]]
# get reactive power
power = df_phase.as_matrix([1+phase, 13+phase])
reactive = power[:,0] * np.tan(power[:,1] * np.pi / 180)
df_phase['Q'] = reactive
df_phase.index = pd.DatetimeIndex(start=fi[:-4], freq='s', periods=86400, tz='GMT')
df_phase = df_phase.tz_convert(timezone)
sm_column_name = {1+phase:('power', 'active'),
5+phase:('current', ''),
8+phase:('voltage', ''),
13+phase:('phase_angle', ''),
'Q': ('power', 'reactive'),
};
df_phase.rename(columns=sm_column_name, inplace=True)
tmp_before = np.size(df_phase.power.active)
df_phase = df_phase[df_phase.power.active != -1]
tmp_after = np.size(df_phase.power.active)
if (tmp_before != tmp_after):
print('Removed missing measurements - Size before: ' + str(tmp_before) + ', size after: ' + str(tmp_after))
df_phase.columns.set_names(LEVEL_NAMES, inplace=True)
if not key in store:
store.put(key, df_phase, format='Table')
else:
store.append(key, df_phase, format='Table')
store.flush()
print 'Building',building_no,', Meter no.',phase,'=> Done for ',fi[:-4]
else:
#Meter number to be used in key
meter_num = int(fl) + 3
key = str(Key(building=building_no, meter=meter_num))
#Getting dataframe for each csv file seperately
for fi in fl_dir_list:
df = pd.read_csv(join(dataset_loc,folder,fl ,fi), names=[1], dtype=np.float64)
df.index = pd.DatetimeIndex(start=fi[:-4], freq='s', periods=86400, tz = 'GMT')
df.rename(columns=plugs_column_name, inplace=True)
df = df.tz_convert(timezone)
df.columns.set_names(LEVEL_NAMES, inplace=True)
tmp_before = np.size(df.power.active)
df = df[df.power.active != -1]
tmp_after = np.size(df.power.active)
if (tmp_before != tmp_after):
print('Removed missing measurements - Size before: ' + str(tmp_before) + ', size after: ' + str(tmp_after))
# If table not present in hdf5, create or else append to existing data
if not key in store:
store.put(key, df, format='Table')
print 'Building',building_no,', Meter no.',meter_num,'=> Done for ',fi[:-4]
else:
store.append(key, df, format='Table')
store.flush()
print 'Building',building_no,', Meter no.',meter_num,'=> Done for ',fi[:-4]
print "Data storage completed."
store.close()
# Adding the metadata to the HDF5file
print "Proceeding to Metadata conversion..."
meta_path = join(_get_module_directory(), 'metadata')
convert_yaml_to_hdf5(meta_path, hdf_filename)
print "Completed Metadata conversion."
def _get_module_directory():
# Taken from http://stackoverflow.com/a/6098238/732596
path_to_this_file = dirname(getfile(currentframe()))
if not isdir(path_to_this_file):
encoding = getfilesystemencoding()
path_to_this_file = dirname(unicode(__file__, encoding))
if not isdir(path_to_this_file):
abspath(getsourcefile(lambda _: None))
if not isdir(path_to_this_file):
path_to_this_file = getcwd()
assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'
return path_to_this_file
| apache-2.0 |
kstensbo/perceptron | python/make_dataset.py | 1 | 1816 | import numpy as np
from sklearn.datasets import make_blobs, make_circles, make_classification, \
make_moons
import matplotlib.pyplot as plt
import seaborn as sns
seapal = sns.color_palette('Paired', 12)
sns.set_style('ticks')
from IPython import embed
if __name__ == "__main__":
info = {}
X, y = make_blobs(30, n_features=2, centers=2, cluster_std=2,
random_state=42)
info["Blobs"] = {'X': X, 'y': y}
X, y = make_circles(100, noise=0.1, factor=0.4, random_state=42)
info["Circles"] = {'X': X, 'y': y}
X, y = make_moons(100, noise=0.1, random_state=42)
info["Moons"] = {'X': X, 'y': y}
X, y = make_classification(100, 2, n_informative=1, n_redundant=0,
n_clusters_per_class=1, random_state=1)
info["Classification"] = {'X': X, 'y': y}
for key, val in info.items():
with open("../Data/{}.hs".format(key), 'w') as f:
X = val['X']
y = val['y']
y[y == 0] = -1
f.write("module Data.{}\n( x\n, y\n) where\n\n".format(key))
f.write("x = [")
for xi in X[:-1]:
f.write("[{:6.4f}, {:6.4f}], ".format(xi[0], xi[1]))
f.write("[{:6.4f}, {:6.4f}]]\n".format(X[-1,0], X[-1,1]))
f.write("\ny :: [Double]")
f.write("\ny = [")
for yi in y[:-1]:
f.write("{:d}, ".format(yi))
f.write("{:d}]\n".format(y[-1]))
#fig, ax = plt.subplots()
## Plot data points
#scolour = [seapal[1] if i==-1 else seapal[3] for i in y]
#ax.scatter(X[:,0], X[:,1], c=scolour, s=42,
# edgecolors=plt.cm.binary(0), alpha=0.8, zorder=100)
#ax.set_xlabel(r'$x_1$', fontsize=14)
#ax.set_ylabel(r'$x_2$', fontsize=14)
#plt.show()
| mit |
Curly-Mo/mir-tools | cross_validate.py | 1 | 7202 | #!/usr/bin/env python
"""Script to run Cross-Validation"""
import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
import argparse
from time import time
import scipy
import sklearn
from sklearn.externals import joblib
from sklearn import cross_validation
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import numpy as np
import feature_extraction
import machine_learning
import util
def report(actual, predicted, plot=False, save=False):
labels = np.unique(np.concatenate([actual, predicted]))
confusion = confusion_matrix(actual, predicted, labels)
confusion_string = machine_learning.confusion_str(confusion, labels)
scores = classification_report(actual, predicted, target_names=labels)
return '{}\n{}'.format(confusion_string, scores)
def sample_report(tracks, plot=False, save=False):
actual, predicted = [], []
for track in tracks:
predicted.extend(track['sample_predictions'])
actual.extend([track['label']] * len(track['sample_predictions']))
return report(actual, predicted, plot, save)
def track_report(tracks, plot=False, save=False):
actual, predicted = [], []
for track in tracks:
actual.append(track['label'])
predicted.append(track['prediction'])
return report(actual, predicted, plot, save)
def best_svm(tracks, feature_names, n_iter=200, save=False):
clf = machine_learning.Classifier('rbfsvm')
X, Y = machine_learning.shape_features(tracks, feature_names)
param_dist = {
'C': scipy.stats.expon(scale=1000),
'class_weight': ['auto'],
#'loss': ['squared_hinge'],
#'penalty': ['l2'],
#'dual': [False],
'tol': scipy.stats.expon(scale=0.1),
}
logging.info('Optimizing parameters: {}'.format(param_dist))
random_search = sklearn.grid_search.RandomizedSearchCV(
clf.clf,
param_distributions=param_dist,
n_iter=n_iter,
verbose=10,
)
random_search.fit(X, Y)
for score in random_search.grid_scores_:
print(score)
print('Best Score: {}'.format(random_search.best_score_))
print('Best Params: {}'.format(random_search.best_params_))
if save:
logging.info('Saving classifier to disk...')
joblib.dump(random_search.best_estimator_, save, compress=True)
return random_search.best_estimator_
def cross_val_score(tracks, feature_names, folds=5):
X, Y = machine_learning.shape_features(tracks, feature_names)
clf = sklearn.svm.LinearSVC(class_weight='auto')
scores = cross_validation.cross_val_score(
clf,
X,
Y,
cv=folds,
scoring='f1_weighted'
)
return scores
def kfold(tracks, feature_names, folds=5, shuffle=True, **kwargs):
labels = [track['label'] for track in tracks]
kf = cross_validation.StratifiedKFold(labels, n_folds=folds, shuffle=shuffle)
for train, test in kf:
train_tracks = [tracks[i] for i in train]
test_tracks = [tracks[i] for i in test]
clf = machine_learning.Classifier(**kwargs)
clf = machine_learning.train_tracks(clf, train_tracks, feature_names)
predicted_all = []
Y_test_all = []
for track in test_tracks:
X_test, Y_test = machine_learning.shape_features([track], feature_names)
predicted = machine_learning.predict(X_test, clf)
track['sample_predictions'] = predicted
track['prediction'], track['predictions'] = util.most_common(predicted)
predicted_all.extend(predicted)
Y_test_all.extend(Y_test)
yield test_tracks
def main(**kwargs):
start = time()
tracks, args = feature_extraction.load_tracks(**kwargs)
if kwargs['action'] == 'kfold':
folds = kfold(tracks, **kwargs)
for tracks in folds:
scores = sample_report(tracks, plot=True)
print(scores)
scores = track_report(tracks, plot=True)
print(scores)
elif kwargs['action'] == 'cross_val_score':
scores = cross_val_score(tracks, args['feature_names'], folds=args.folds)
print(scores)
elif kwargs['action'] == 'optimize':
clf = best_svm(tracks, args['feature_names'], save=kwargs['save_classifier'])
print(clf)
end = time()
logging.info('Elapsed time: {}'.format(end - start))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Extract instrument stems from medleydb")
parser.add_argument('action', type=str,
choices={'kfold', 'cross_val_score', 'optimize'},
help='Action to take')
parser.add_argument('label', type=str,
choices={'instrument', 'genre'},
help='Track label')
parser.add_argument('-s', '--save_features', type=str, default=None,
help='Location to save pickled features to')
parser.add_argument('-l', '--load_features', type=str, default=None,
help='Location to load pickled features from')
parser.add_argument('-m', '--min_sources', type=int, default=10,
help='Min sources required for instrument selection')
parser.add_argument('-i', '--instruments', nargs='*', default=None,
help='List of instruments to extract')
parser.add_argument('-g', '--genres', nargs='*', default=None,
help='List of genres to extract')
parser.add_argument('-c', '--count', type=int, default=None,
help='Max number of tracks for each label')
parser.add_argument('-r', '--rm_silence', action='store_true',
help='Remove silence from audio files')
parser.add_argument('-t', '--trim', type=int, default=None,
help='Trim audio files to this length (in seconds)')
parser.add_argument('-k', '--folds', type=int, default=5,
help='Number of folds in kfold cross validation')
parser.add_argument('-n', '--n_fft', type=int, default=2048,
help='FFT size of MFCCs')
parser.add_argument('--hop_length', type=int, default=1024,
help='Hop size of MFCCs')
parser.add_argument('-a', '--average', type=int, default=None,
help='Number of seconds to average features over')
parser.add_argument('--normalize', action='store_true',
help='Normalize MFCC feature vectors between 0 and 1')
parser.add_argument('-f', '--feature_names', nargs='+', default=None,
choices=['mfcc', 'mfcc_delta', 'mfcc_delta_delta'],
help='List of features names to use')
parser.add_argument('--save_classifier', type=str, default=None,
help='Location to save pickled classifier to')
parser.add_argument('--classifier', type=str, default='svm',
choices=['linearsvm', 'rbfsvm', 'adaboost'],
help='Type of classifier to use.')
args = parser.parse_args()
main(**vars(args))
| mit |
xzh86/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
wschenck/nest-simulator | pynest/nest/tests/test_spatial/test_plotting.py | 12 | 5748 | # -*- coding: utf-8 -*-
#
# test_plotting.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for basic spatial plotting functions.
"""
import unittest
import nest
import numpy as np
try:
import matplotlib.pyplot as plt
tmp_fig = plt.figure() # make sure we can open a window; DISPLAY may not be set
plt.close(tmp_fig)
PLOTTING_POSSIBLE = True
except:
PLOTTING_POSSIBLE = False
@unittest.skipIf(not PLOTTING_POSSIBLE,
'Plotting impossible because matplotlib or display missing')
class PlottingTestCase(unittest.TestCase):
def test_PlotLayer(self):
"""Test plotting layer."""
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[3, 3],
extent=[2., 2.],
edge_wrap=True))
nest.PlotLayer(l)
plotted_datapoints = plt.gca().collections[-1].get_offsets().data
reference_datapoints = nest.GetPosition(l)
self.assertTrue(np.allclose(plotted_datapoints, reference_datapoints))
def test_PlotTargets(self):
"""Test plotting targets."""
delta = 0.05
mask = {'rectangular': {'lower_left': [-delta, -2/3 - delta], 'upper_right': [2/3 + delta, delta]}}
cdict = {'rule': 'pairwise_bernoulli', 'p': 1.,
'mask': mask}
sdict = {'synapse_model': 'stdp_synapse'}
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[3, 3],
extent=[2., 2.],
edge_wrap=True))
# connect l -> l
nest.Connect(l, l, cdict, sdict)
ctr = nest.FindCenterElement(l)
fig = nest.PlotTargets(ctr, l)
fig.gca().set_title('Plain call')
plotted_datapoints = plt.gca().collections[0].get_offsets().data
eps = 0.01
pos = np.array(nest.GetPosition(l))
pos_xmask = pos[np.where(pos[:, 0] > -eps)]
reference_datapoints = pos_xmask[np.where(pos_xmask[:, 1] < eps)][::-1]
self.assertTrue(np.array_equal(np.sort(plotted_datapoints, axis=0), np.sort(reference_datapoints, axis=0)))
fig = nest.PlotTargets(ctr, l, mask=mask)
ax = fig.gca()
ax.set_title('Call with mask')
self.assertGreaterEqual(len(ax.patches), 1)
def test_plot_probability_kernel(self):
"""Plot parameter probability"""
nest.ResetKernel()
plot_shape = [10, 10]
plot_edges = [-0.5, 0.5, -0.5, 0.5]
def probability_calculation(distance):
return 1 - 1.5*distance
l = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([10, 10], edge_wrap=False))
source = l[25]
source_pos = np.array(nest.GetPosition(source))
source_x, source_y = source_pos
# Calculate reference values
ref_probability = np.zeros(plot_shape[::-1])
for i, x in enumerate(np.linspace(plot_edges[0], plot_edges[1], plot_shape[0])):
positions = np.array([[x, y] for y in np.linspace(plot_edges[2], plot_edges[3], plot_shape[1])])
ref_distances = np.sqrt((positions[:, 0] - source_x)**2 + (positions[:, 1] - source_y)**2)
values = probability_calculation(ref_distances)
ref_probability[:, i] = np.maximum(np.minimum(np.array(values), 1.0), 0.0)
# Create the parameter
parameter = probability_calculation(nest.spatial.distance)
fig, ax = plt.subplots()
nest.PlotProbabilityParameter(source, parameter, ax=ax, shape=plot_shape, edges=plot_edges)
self.assertEqual(len(ax.images), 1)
img = ax.images[0]
img_data = img.get_array().data
self.assertTrue(np.array_equal(img_data, ref_probability))
def test_plot_probability_kernel_with_mask(self):
"""Plot parameter probability with mask"""
nest.ResetKernel()
plot_shape = [10, 10]
plot_edges = [-0.5, 0.5, -0.5, 0.5]
l = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([10, 10], edge_wrap=False))
parameter = 1 - 1.5*nest.spatial.distance
source = l[25]
masks = [{'circular': {'radius': 0.4}},
{'doughnut': {'inner_radius': 0.2, 'outer_radius': 0.45}},
{'rectangular': {'lower_left': [-.3, -.3], 'upper_right': [0.3, 0.3]}},
{'elliptical': {'major_axis': 0.8, 'minor_axis': 0.4}}]
fig, axs = plt.subplots(2, 2)
for mask, ax in zip(masks, axs.flatten()):
nest.PlotProbabilityParameter(source, parameter, mask=mask, ax=ax, shape=plot_shape, edges=plot_edges)
self.assertEqual(len(ax.images), 1)
self.assertGreaterEqual(len(ax.patches), 1)
def suite():
suite = unittest.makeSuite(PlottingTestCase, 'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
plt.show()
| gpl-2.0 |
wzbozon/statsmodels | statsmodels/sandbox/tsa/diffusion.py | 31 | 18732 | '''getting started with diffusions, continuous time stochastic processes
Author: josef-pktd
License: BSD
References
----------
An Algorithmic Introduction to Numerical Simulation of Stochastic Differential
Equations
Author(s): Desmond J. Higham
Source: SIAM Review, Vol. 43, No. 3 (Sep., 2001), pp. 525-546
Published by: Society for Industrial and Applied Mathematics
Stable URL: http://www.jstor.org/stable/3649798
http://www.sitmo.com/ especially the formula collection
Notes
-----
OU process: use same trick for ARMA with constant (non-zero mean) and drift
some of the processes have easy multivariate extensions
*Open Issues*
include xzero in returned sample or not? currently not
*TODOS*
* Milstein from Higham paper, for which processes does it apply
* Maximum Likelihood estimation
* more statistical properties (useful for tests)
* helper functions for display and MonteCarlo summaries (also for testing/checking)
* more processes for the menagerie (e.g. from empirical papers)
* characteristic functions
* transformations, non-linear e.g. log
* special estimators, e.g. Ait Sahalia, empirical characteristic functions
* fft examples
* check naming of methods, "simulate", "sample", "simexact", ... ?
stochastic volatility models: estimation unclear
finance applications ? option pricing, interest rate models
'''
from __future__ import print_function
import numpy as np
from scipy import stats, signal
import matplotlib.pyplot as plt
#np.random.seed(987656789)
class Diffusion(object):
'''Wiener Process, Brownian Motion with mu=0 and sigma=1
'''
def __init__(self):
pass
def simulateW(self, nobs=100, T=1, dt=None, nrepl=1):
'''generate sample of Wiener Process
'''
dt = T*1.0/nobs
t = np.linspace(dt, 1, nobs)
dW = np.sqrt(dt)*np.random.normal(size=(nrepl, nobs))
W = np.cumsum(dW,1)
self.dW = dW
return W, t
def expectedsim(self, func, nobs=100, T=1, dt=None, nrepl=1):
'''get expectation of a function of a Wiener Process by simulation
initially test example from
'''
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
U = func(t, W)
Umean = U.mean(0)
return U, Umean, t
class AffineDiffusion(Diffusion):
'''
differential equation:
:math::
dx_t = f(t,x)dt + \sigma(t,x)dW_t
integral:
:math::
x_T = x_0 + \\int_{0}^{T}f(t,S)dt + \\int_0^T \\sigma(t,S)dW_t
TODO: check definition, affine, what about jump diffusion?
'''
def __init__(self):
pass
def sim(self, nobs=100, T=1, dt=None, nrepl=1):
# this doesn't look correct if drift or sig depend on x
# see arithmetic BM
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
dx = self._drift() + self._sig() * W
x = np.cumsum(dx,1)
xmean = x.mean(0)
return x, xmean, t
def simEM(self, xzero=None, nobs=100, T=1, dt=None, nrepl=1, Tratio=4):
'''
from Higham 2001
TODO: reverse parameterization to start with final nobs and DT
TODO: check if I can skip the loop using my way from exactprocess
problem might be Winc (reshape into 3d and sum)
TODO: (later) check memory efficiency for large simulations
'''
#TODO: reverse parameterization to start with final nobs and DT
nobs = nobs * Tratio # simple way to change parameter
# maybe wrong parameterization,
# drift too large, variance too small ? which dt/Dt
# _drift, _sig independent of dt is wrong
if xzero is None:
xzero = self.xzero
if dt is None:
dt = T*1.0/nobs
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
dW = self.dW
t = np.linspace(dt, 1, nobs)
Dt = Tratio*dt;
L = nobs/Tratio; # L EM steps of size Dt = R*dt
Xem = np.zeros((nrepl,L)); # preallocate for efficiency
Xtemp = xzero
Xem[:,0] = xzero
for j in np.arange(1,L):
#Winc = np.sum(dW[:,Tratio*(j-1)+1:Tratio*j],1)
Winc = np.sum(dW[:,np.arange(Tratio*(j-1)+1,Tratio*j)],1)
#Xtemp = Xtemp + Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xtemp = Xtemp + self._drift(x=Xtemp) + self._sig(x=Xtemp) * Winc
#Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xem[:,j] = Xtemp
return Xem
'''
R = 4; Dt = R*dt; L = N/R; % L EM steps of size Dt = R*dt
Xem = zeros(1,L); % preallocate for efficiency
Xtemp = Xzero;
for j = 1:L
Winc = sum(dW(R*(j-1)+1:R*j));
Xtemp = Xtemp + Dt*lambda*Xtemp + mu*Xtemp*Winc;
Xem(j) = Xtemp;
end
'''
class ExactDiffusion(AffineDiffusion):
'''Diffusion that has an exact integral representation
this is currently mainly for geometric, log processes
'''
def __init__(self):
pass
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
'''
t = np.linspace(ddt, nobs*ddt, nobs)
#expnt = np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? if mean reverting lag-coeff<1
#lfilter doesn't handle 2d arrays, it does?
inc = self._exactconst(expddt) + self._exactstd(expddt) * normrvs
return signal.lfilter([1.], [1.,-expddt], inc)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
meant = xzero * expnt + self._exactconst(expnt)
stdt = self._exactstd(expnt)
return stats.norm(loc=meant, scale=stdt)
class ArithmeticBrownian(AffineDiffusion):
'''
:math::
dx_t &= \\mu dt + \\sigma dW_t
'''
def __init__(self, xzero, mu, sigma):
self.xzero = xzero
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
return self.mu
def _sig(self, *args, **kwds):
return self.sigma
def exactprocess(self, nobs, xzero=None, ddt=1., nrepl=2):
'''ddt : discrete delta t
not tested yet
'''
if xzero is None:
xzero = self.xzero
t = np.linspace(ddt, nobs*ddt, nobs)
normrvs = np.random.normal(size=(nrepl,nobs))
inc = self._drift + self._sigma * np.sqrt(ddt) * normrvs
#return signal.lfilter([1.], [1.,-1], inc)
return xzero + np.cumsum(inc,1)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
meant = self._drift * t
stdt = self._sigma * np.sqrt(t)
return stats.norm(loc=meant, scale=stdt)
class GeometricBrownian(AffineDiffusion):
'''Geometric Brownian Motion
:math::
dx_t &= \\mu x_t dt + \\sigma x_t dW_t
$x_t $ stochastic process of Geometric Brownian motion,
$\mu $ is the drift,
$\sigma $ is the Volatility,
$W$ is the Wiener process (Brownian motion).
'''
def __init__(self, xzero, mu, sigma):
self.xzero = xzero
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
x = kwds['x']
return self.mu * x
def _sig(self, *args, **kwds):
x = kwds['x']
return self.sigma * x
class OUprocess(AffineDiffusion):
'''Ornstein-Uhlenbeck
:math::
dx_t&=\\lambda(\\mu - x_t)dt+\\sigma dW_t
mean reverting process
TODO: move exact higher up in class hierarchy
'''
def __init__(self, xzero, mu, lambd, sigma):
self.xzero = xzero
self.lambd = lambd
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
x = kwds['x']
return self.lambd * (self.mu - x)
def _sig(self, *args, **kwds):
x = kwds['x']
return self.sigma * x
def exact(self, xzero, t, normrvs):
#TODO: aggregate over time for process with observations for all t
# i.e. exact conditional distribution for discrete time increment
# -> exactprocess
#TODO: for single t, return stats.norm -> exactdist
expnt = np.exp(-self.lambd * t)
return (xzero * expnt + self.mu * (1-expnt) +
self.sigma * np.sqrt((1-expnt*expnt)/2./self.lambd) * normrvs)
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
# after writing this I saw the same use of lfilter in sitmo
'''
t = np.linspace(ddt, nobs*ddt, nobs)
expnt = np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? lfilter doesn't handle 2d arrays, it does?
from scipy import signal
#xzero * expnt
inc = ( self.mu * (1-expddt) +
self.sigma * np.sqrt((1-expddt*expddt)/2./self.lambd) * normrvs )
return signal.lfilter([1.], [1.,-expddt], inc)
def exactdist(self, xzero, t):
#TODO: aggregate over time for process with observations for all t
#TODO: for single t, return stats.norm
expnt = np.exp(-self.lambd * t)
meant = xzero * expnt + self.mu * (1-expnt)
stdt = self.sigma * np.sqrt((1-expnt*expnt)/2./self.lambd)
from scipy import stats
return stats.norm(loc=meant, scale=stdt)
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs), data[:-1]))
parest, res, rank, sing = np.linalg.lstsq(exog, data[1:])
const, slope = parest
errvar = res/(nobs-2.)
lambd = -np.log(slope)/dt
sigma = np.sqrt(-errvar * 2.*np.log(slope)/ (1-slope**2)/dt)
mu = const / (1-slope)
return mu, lambd, sigma
class SchwartzOne(ExactDiffusion):
'''the Schwartz type 1 stochastic process
:math::
dx_t = \\kappa (\\mu - \\ln x_t) x_t dt + \\sigma x_tdW \\
The Schwartz type 1 process is a log of the Ornstein-Uhlenbeck stochastic
process.
'''
def __init__(self, xzero, mu, kappa, sigma):
self.xzero = xzero
self.mu = mu
self.kappa = kappa
self.lambd = kappa #alias until I fix exact
self.sigma = sigma
def _exactconst(self, expnt):
return (1-expnt) * (self.mu - self.sigma**2 / 2. /self.kappa)
def _exactstd(self, expnt):
return self.sigma * np.sqrt((1-expnt*expnt)/2./self.kappa)
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''uses exact solution for log of process
'''
lnxzero = np.log(xzero)
lnx = super(self.__class__, self).exactprocess(xzero, nobs, ddt=ddt, nrepl=nrepl)
return np.exp(lnx)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
#TODO: check this is still wrong, just guessing
meant = np.log(xzero) * expnt + self._exactconst(expnt)
stdt = self._exactstd(expnt)
return stats.lognorm(loc=meant, scale=stdt)
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs),np.log(data[:-1])))
parest, res, rank, sing = np.linalg.lstsq(exog, np.log(data[1:]))
const, slope = parest
errvar = res/(nobs-2.) #check denominator estimate, of sigma too low
kappa = -np.log(slope)/dt
sigma = np.sqrt(errvar * kappa / (1-np.exp(-2*kappa*dt)))
mu = const / (1-np.exp(-kappa*dt)) + sigma**2/2./kappa
if np.shape(mu)== (1,): mu = mu[0] # how to remove scalar array ?
if np.shape(sigma)== (1,): sigma = sigma[0]
#mu, kappa are good, sigma too small
return mu, kappa, sigma
class BrownianBridge(object):
def __init__(self):
pass
def simulate(self, x0, x1, nobs, nrepl=1, ddt=1., sigma=1.):
nobs=nobs+1
dt = ddt*1./nobs
t = np.linspace(dt, ddt-dt, nobs)
t = np.linspace(dt, ddt, nobs)
wm = [t/ddt, 1-t/ddt]
#wmi = wm[1]
#wm1 = x1*wm[0]
wmi = 1-dt/(ddt-t)
wm1 = x1*(dt/(ddt-t))
su = sigma* np.sqrt(t*(1-t)/ddt)
s = sigma* np.sqrt(dt*(ddt-t-dt)/(ddt-t))
x = np.zeros((nrepl, nobs))
x[:,0] = x0
rvs = s*np.random.normal(size=(nrepl,nobs))
for i in range(1,nobs):
x[:,i] = x[:,i-1]*wmi[i] + wm1[i] + rvs[:,i]
return x, t, su
class CompoundPoisson(object):
'''nobs iid compound poisson distributions, not a process in time
'''
def __init__(self, lambd, randfn=np.random.normal):
if len(lambd) != len(randfn):
raise ValueError('lambd and randfn need to have the same number of elements')
self.nobj = len(lambd)
self.randfn = randfn
self.lambd = np.asarray(lambd)
def simulate(self, nobs, nrepl=1):
nobj = self.nobj
x = np.zeros((nrepl, nobs, nobj))
N = np.random.poisson(self.lambd[None,None,:], size=(nrepl,nobs,nobj))
for io in range(nobj):
randfnc = self.randfn[io]
nc = N[:,:,io]
#print nrepl,nobs,nc
#xio = randfnc(size=(nrepl,nobs,np.max(nc))).cumsum(-1)[np.arange(nrepl)[:,None],np.arange(nobs),nc-1]
rvs = randfnc(size=(nrepl,nobs,np.max(nc)))
print('rvs.sum()', rvs.sum(), rvs.shape)
xio = rvs.cumsum(-1)[np.arange(nrepl)[:,None],np.arange(nobs),nc-1]
#print xio.shape
x[:,:,io] = xio
x[N==0] = 0
return x, N
'''
randn('state',100) % set the state of randn
T = 1; N = 500; dt = T/N; t = [dt:dt:1];
M = 1000; % M paths simultaneously
dW = sqrt(dt)*randn(M,N); % increments
W = cumsum(dW,2); % cumulative sum
U = exp(repmat(t,[M 1]) + 0.5*W);
Umean = mean(U);
plot([0,t],[1,Umean],'b-'), hold on % plot mean over M paths
plot([0,t],[ones(5,1),U(1:5,:)],'r--'), hold off % plot 5 individual paths
xlabel('t','FontSize',16)
ylabel('U(t)','FontSize',16,'Rotation',0,'HorizontalAlignment','right')
legend('mean of 1000 paths','5 individual paths',2)
averr = norm((Umean - exp(9*t/8)),'inf') % sample error
'''
if __name__ == '__main__':
doplot = 1
nrepl = 1000
examples = []#['all']
if 'all' in examples:
w = Diffusion()
# Wiener Process
# ^^^^^^^^^^^^^^
ws = w.simulateW(1000, nrepl=nrepl)
if doplot:
plt.figure()
tmp = plt.plot(ws[0].T)
tmp = plt.plot(ws[0].mean(0), linewidth=2)
plt.title('Standard Brownian Motion (Wiener Process)')
func = lambda t, W: np.exp(t + 0.5*W)
us = w.expectedsim(func, nobs=500, nrepl=nrepl)
if doplot:
plt.figure()
tmp = plt.plot(us[0].T)
tmp = plt.plot(us[1], linewidth=2)
plt.title('Brownian Motion - exp')
#plt.show()
averr = np.linalg.norm(us[1] - np.exp(9*us[2]/8.), np.inf)
print(averr)
#print us[1][:10]
#print np.exp(9.*us[2][:10]/8.)
# Geometric Brownian
# ^^^^^^^^^^^^^^^^^^
gb = GeometricBrownian(xzero=1., mu=0.01, sigma=0.5)
gbs = gb.simEM(nobs=100, nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(gbs.T)
tmp = plt.plot(gbs.mean(0), linewidth=2)
plt.title('Geometric Brownian')
plt.figure()
tmp = plt.plot(np.log(gbs).T)
tmp = plt.plot(np.log(gbs.mean(0)), linewidth=2)
plt.title('Geometric Brownian - log-transformed')
ab = ArithmeticBrownian(xzero=1, mu=0.05, sigma=1)
abs = ab.simEM(nobs=100, nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(abs.T)
tmp = plt.plot(abs.mean(0), linewidth=2)
plt.title('Arithmetic Brownian')
# Ornstein-Uhlenbeck
# ^^^^^^^^^^^^^^^^^^
ou = OUprocess(xzero=2, mu=1, lambd=0.5, sigma=0.1)
ous = ou.simEM()
oue = ou.exact(1, 1, np.random.normal(size=(5,10)))
ou.exact(0, np.linspace(0,10,10/0.1), 0)
ou.exactprocess(0,10)
print(ou.exactprocess(0,10, ddt=0.1,nrepl=10).mean(0))
#the following looks good, approaches mu
oues = ou.exactprocess(0,100, ddt=0.1,nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(oues.T)
tmp = plt.plot(oues.mean(0), linewidth=2)
plt.title('Ornstein-Uhlenbeck')
# SchwartsOne
# ^^^^^^^^^^^
so = SchwartzOne(xzero=0, mu=1, kappa=0.5, sigma=0.1)
sos = so.exactprocess(0,50, ddt=0.1,nrepl=100)
print(sos.mean(0))
print(np.log(sos.mean(0)))
doplot = 1
if doplot:
plt.figure()
tmp = plt.plot(sos.T)
tmp = plt.plot(sos.mean(0), linewidth=2)
plt.title('Schwartz One')
print(so.fitls(sos[0,:],dt=0.1))
sos2 = so.exactprocess(0,500, ddt=0.1,nrepl=5)
print('true: mu=1, kappa=0.5, sigma=0.1')
for i in range(5):
print(so.fitls(sos2[i],dt=0.1))
# Brownian Bridge
# ^^^^^^^^^^^^^^^
bb = BrownianBridge()
#bbs = bb.sample(x0, x1, nobs, nrepl=1, ddt=1., sigma=1.)
bbs, t, wm = bb.simulate(0, 0.5, 99, nrepl=500, ddt=1., sigma=0.1)
if doplot:
plt.figure()
tmp = plt.plot(bbs.T)
tmp = plt.plot(bbs.mean(0), linewidth=2)
plt.title('Brownian Bridge')
plt.figure()
plt.plot(wm,'r', label='theoretical')
plt.plot(bbs.std(0), label='simulated')
plt.title('Brownian Bridge - Variance')
plt.legend()
# Compound Poisson
# ^^^^^^^^^^^^^^^^
cp = CompoundPoisson([1,1], [np.random.normal,np.random.normal])
cps = cp.simulate(nobs=20000,nrepl=3)
print(cps[0].sum(-1).sum(-1))
print(cps[0].sum())
print(cps[0].mean(-1).mean(-1))
print(cps[0].mean())
print(cps[1].size)
print(cps[1].sum())
#Note Y = sum^{N} X is compound poisson of iid x, then
#E(Y) = E(N)*E(X) eg. eq. (6.37) page 385 in http://ee.stanford.edu/~gray/sp.html
#plt.show()
| bsd-3-clause |
zbanga/trading-with-python | cookbook/reconstructVXX/reconstructVXX.py | 77 | 3574 | # -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
| bsd-3-clause |
devanshdalal/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 108 | 2026 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
Clyde-fare/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
nesterione/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
HyukjinKwon/spark | python/pyspark/sql/pandas/typehints.py | 26 | 6324 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql.pandas.utils import require_minimum_pandas_version
def infer_eval_type(sig):
"""
Infers the evaluation type in :class:`pyspark.rdd.PythonEvalType` from
:class:`inspect.Signature` instance.
"""
from pyspark.sql.pandas.functions import PandasUDFType
require_minimum_pandas_version()
import pandas as pd
annotations = {}
for param in sig.parameters.values():
if param.annotation is not param.empty:
annotations[param.name] = param.annotation
# Check if all arguments have type hints
parameters_sig = [annotations[parameter] for parameter
in sig.parameters if parameter in annotations]
if len(parameters_sig) != len(sig.parameters):
raise ValueError(
"Type hints for all parameters should be specified; however, got %s" % sig)
# Check if the return has a type hint
return_annotation = sig.return_annotation
if sig.empty is return_annotation:
raise ValueError(
"Type hint for the return type should be specified; however, got %s" % sig)
# Series, Frame or Union[DataFrame, Series], ... -> Series or Frame
is_series_or_frame = (
all(a == pd.Series or # Series
a == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
a,
parameter_check_func=lambda na: na == pd.Series or na == pd.DataFrame)
for a in parameters_sig) and
(return_annotation == pd.Series or return_annotation == pd.DataFrame))
# Iterator[Tuple[Series, Frame or Union[DataFrame, Series], ...] -> Iterator[Series or Frame]
is_iterator_tuple_series_or_frame = (
len(parameters_sig) == 1 and
check_iterator_annotation( # Iterator
parameters_sig[0],
parameter_check_func=lambda a: check_tuple_annotation( # Tuple
a,
parameter_check_func=lambda ta: (
ta == Ellipsis or # ...
ta == pd.Series or # Series
ta == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
ta,
parameter_check_func=lambda na: (
na == pd.Series or na == pd.DataFrame))))) and
check_iterator_annotation(
return_annotation,
parameter_check_func=lambda a: a == pd.DataFrame or a == pd.Series))
# Iterator[Series, Frame or Union[DataFrame, Series]] -> Iterator[Series or Frame]
is_iterator_series_or_frame = (
len(parameters_sig) == 1 and
check_iterator_annotation(
parameters_sig[0],
parameter_check_func=lambda a: (
a == pd.Series or # Series
a == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
a,
parameter_check_func=lambda ua: ua == pd.Series or ua == pd.DataFrame))) and
check_iterator_annotation(
return_annotation,
parameter_check_func=lambda a: a == pd.DataFrame or a == pd.Series))
# Series, Frame or Union[DataFrame, Series], ... -> Any
is_series_or_frame_agg = (
all(a == pd.Series or # Series
a == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
a,
parameter_check_func=lambda ua: ua == pd.Series or ua == pd.DataFrame)
for a in parameters_sig) and (
# It's tricky to include only types which pd.Series constructor can take.
# Simply exclude common types used here for now (which becomes object
# types Spark can't recognize).
return_annotation != pd.Series and
return_annotation != pd.DataFrame and
not check_iterator_annotation(return_annotation) and
not check_tuple_annotation(return_annotation)
))
if is_series_or_frame:
return PandasUDFType.SCALAR
elif is_iterator_tuple_series_or_frame or is_iterator_series_or_frame:
return PandasUDFType.SCALAR_ITER
elif is_series_or_frame_agg:
return PandasUDFType.GROUPED_AGG
else:
raise NotImplementedError("Unsupported signature: %s." % sig)
def check_tuple_annotation(annotation, parameter_check_func=None):
# Python 3.6 has `__name__`. Python 3.7 and 3.8 have `_name`.
# Check if the name is Tuple first. After that, check the generic types.
name = getattr(annotation, "_name", getattr(annotation, "__name__", None))
return name == "Tuple" and (
parameter_check_func is None or all(map(parameter_check_func, annotation.__args__)))
def check_iterator_annotation(annotation, parameter_check_func=None):
name = getattr(annotation, "_name", getattr(annotation, "__name__", None))
return name == "Iterator" and (
parameter_check_func is None or all(map(parameter_check_func, annotation.__args__)))
def check_union_annotation(annotation, parameter_check_func=None):
import typing
# Note that we cannot rely on '__origin__' in other type hints as it has changed from version
# to version. For example, it's abc.Iterator in Python 3.7 but typing.Iterator in Python 3.6.
origin = getattr(annotation, "__origin__", None)
return origin == typing.Union and (
parameter_check_func is None or all(map(parameter_check_func, annotation.__args__)))
| apache-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/indexes/period/test_arithmetic.py | 2 | 4488 | import numpy as np
import pytest
import pandas as pd
from pandas import PeriodIndex, period_range
import pandas.util.testing as tm
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# PeriodIndex.shift is used by __add__ and __sub__
def test_pi_shift_ndarray(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
result = idx.shift(np.array([1, 2, 3, 4]))
expected = PeriodIndex(
["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx"
)
tm.assert_index_equal(result, expected)
result = idx.shift(np.array([1, -2, 3, -4]))
expected = PeriodIndex(
["2011-02", "2010-12", "NaT", "2010-12"], freq="M", name="idx"
)
tm.assert_index_equal(result, expected)
def test_shift(self):
pi1 = period_range(freq="A", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="A", start="1/1/2002", end="12/1/2010")
tm.assert_index_equal(pi1.shift(0), pi1)
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = period_range(freq="A", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="A", start="1/1/2000", end="12/1/2008")
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = period_range(freq="M", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="M", start="2/1/2001", end="1/1/2010")
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = period_range(freq="M", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="M", start="12/1/2000", end="11/1/2009")
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = period_range(freq="D", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="D", start="1/2/2001", end="12/2/2009")
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = period_range(freq="D", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="D", start="12/31/2000", end="11/30/2009")
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
def test_shift_corner_cases(self):
# GH#9903
idx = pd.PeriodIndex([], name="xxx", freq="H")
with pytest.raises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq="H")
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(
["2011-01-01 10:00", "2011-01-01 11:00", "2011-01-01 12:00"],
name="xxx",
freq="H",
)
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(
["2011-01-01 13:00", "2011-01-01 14:00", "2011-01-01 15:00"],
name="xxx",
freq="H",
)
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(
["2011-01-01 07:00", "2011-01-01 08:00", "2011-01-01 09:00"],
name="xxx",
freq="H",
)
tm.assert_index_equal(idx.shift(-3), exp)
def test_shift_nat(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
result = idx.shift(1)
expected = PeriodIndex(
["2011-02", "2011-03", "NaT", "2011-05"], freq="M", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_shift_gh8083(self):
# test shift for PeriodIndex
# GH#8083
drange = pd.period_range("20130101", periods=5, freq="D")
result = drange.shift(1)
expected = PeriodIndex(
["2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05", "2013-01-06"],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_shift_periods(self):
# GH #22458 : argument 'n' was deprecated in favor of 'periods'
idx = period_range(freq="A", start="1/1/2001", end="12/1/2009")
tm.assert_index_equal(idx.shift(periods=0), idx)
tm.assert_index_equal(idx.shift(0), idx)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=True):
tm.assert_index_equal(idx.shift(n=0), idx)
| apache-2.0 |
LiaoPan/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/examples/realtime/ftclient_rt_average.py | 14 | 2841 | """
========================================================
Compute real-time evoked responses with FieldTrip client
========================================================
This example demonstrates how to connect the MNE real-time
system to the Fieldtrip buffer using FieldTripClient class.
This example was tested in simulation mode
neuromag2ft --file MNE-sample-data/MEG/sample/sample_audvis_raw.fif
using a modified version of neuromag2ft available at
http://neuro.hut.fi/~mainak/neuromag2ft-2.0.0.zip
to run the FieldTrip buffer. Then running this example acquires the
data on the client side.
Since the Fieldtrip buffer does not contain all the
measurement information required by the MNE real-time processing
pipeline, an info dictionary must be provided to instantiate FieldTripClient.
Alternatively, the MNE-Python script will try to guess the missing
measurement info from the Fieldtrip Header object.
Together with RtEpochs, this can be used to compute evoked
responses using moving averages.
"""
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_events
from mne.realtime import FieldTripClient, RtEpochs
print(__doc__)
# select the left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
# user must provide list of bad channels because
# FieldTrip header object does not provide that
bads = ['MEG 2443', 'EEG 053']
plt.ion() # make plot interactive
_, ax = plt.subplots(2, 1, figsize=(8, 8)) # create subplots
with FieldTripClient(host='localhost', port=1972,
tmax=150, wait_max=10) as rt_client:
# get measurement info guessed by MNE-Python
raw_info = rt_client.get_measurement_info()
# select gradiometers
picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True,
stim=True, exclude=bads)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax,
stim_channel='STI 014', picks=picks,
reject=dict(grad=4000e-13, eog=150e-6),
decim=1, isi_max=10.0, proj=None)
# start the acquisition
rt_epochs.start()
for ii, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ii + 1))
if ii == 0:
evoked = ev
else:
evoked += ev
ax[0].cla()
ax[1].cla() # clear axis
plot_events(rt_epochs.events[-5:], sfreq=ev.info['sfreq'],
first_samp=-rt_client.tmin_samp, axes=ax[0])
evoked.plot(axes=ax[1]) # plot on second subplot
ax[1].set_title('Evoked response for gradiometer channels'
'(event_id = %d)' % event_id)
plt.pause(0.05)
plt.draw()
plt.close()
| bsd-3-clause |
aleaf/pest_tools | load_jco.py | 2 | 2515 | import numpy as np
import pandas as pd
import struct
def load_jco(file_name, return_par = True, return_obs = True):
'''Read PEST Jacobian matrix file (binary) into Pandas data frame
Parameters
----------
file_name : string
File name for .jco (binary) produced by PEST
return_par : {True, False}, optional
If True (default) return list of parameters
return_obs : {True, False}, optional
If True (default) return list of observations
Returns
-------
jco_df : Pandas DataFrame
DataFrame of the Jacobian Matrix. Index entries of DataFrame are
observations (rows). Columns are parameters
par_names : list
List of parmeter names. Returned if return_par = True
ob_names : list
List of observation names. Returned if return_obs = True
'''
f = open(file_name,'rb')
#--the header data type
npar = abs(struct.unpack('i', f.read(4))[0])
nobs = abs(struct.unpack('i', f.read(4))[0])
count = abs(struct.unpack('i', f.read(4))[0])
x = np.zeros((nobs, npar))
#--read all data records
for record in range(count):
if count > 1000000:
if record % 1000000 == 0:
percent = (float(record) / count) *100
print '%.1f Percent; Record %s of %s \r' % (percent, record, count)
j = struct.unpack('i', f.read(4))[0]
col = ((j-1) / nobs) + 1
row = j - ((col - 1) * nobs)
data = struct.unpack('d', f.read(8))[0]
x[row-1, col-1] = data
#--read parameter names
par_names = []
for i in range(npar):
par_name = struct.unpack('12s', f.read(12))[0].strip().lower()
par_names.append(par_name)
#print 'par:',pn
#--read obs names
obs_names = []
for i in range(nobs):
ob_name = struct.unpack('20s', f.read(20))[0].strip().lower()
obs_names.append(ob_name)
#print 'obs:',on
f.close()
jco_df = pd.DataFrame(x, index = obs_names, columns = par_names)
# Clean Up
del(x)
if return_par == True and return_obs == True:
return jco_df, par_names, obs_names
if return_par == True and return_obs == False:
return jco_df, par_names
if return_par == False and return_obs == True:
return jco_df, obs_names
if return_par == False and return_obs == False:
return jco_df
| mit |
nwjs/chromium.src | tools/perf/cli_tools/soundwave/tables/timeseries.py | 10 | 5908 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from cli_tools.soundwave import pandas_sqlite
from core.external_modules import pandas
TABLE_NAME = 'timeseries'
COLUMN_TYPES = (
# Index columns.
('test_suite', str), # benchmark name ('loading.mobile')
('measurement', str), # metric name ('timeToFirstContentfulPaint')
('bot', str), # master/builder name ('ChromiumPerf.android-nexus5')
('test_case', str), # story name ('Wikipedia')
('point_id', 'int64'), # monotonically increasing id for time series axis
# Other columns.
('value', 'float64'), # value recorded for test_path at given point_id
('timestamp', 'datetime64[ns]'), # when the value got stored on dashboard
('commit_pos', 'int64'), # chromium commit position
('chromium_rev', str), # git hash of chromium revision
('clank_rev', str), # git hash of clank revision
('trace_url', str), # URL to a sample trace.
('units', str), # unit of measurement (e.g. 'ms', 'bytes')
('improvement_direction', str), # good direction ('up', 'down', 'unknown')
)
COLUMNS = tuple(c for c, _ in COLUMN_TYPES)
INDEX = COLUMNS[:5]
# Copied from https://goo.gl/DzGYpW.
_CODE_TO_IMPROVEMENT_DIRECTION = {
0: 'up',
1: 'down',
}
TEST_PATH_PARTS = (
'master', 'builder', 'test_suite', 'measurement', 'test_case')
# Query template to find all data points of a given test_path (i.e. fixed
# test_suite, measurement, bot, and test_case values).
_QUERY_TIME_SERIES = (
'SELECT * FROM %s WHERE %s'
% (TABLE_NAME, ' AND '.join('%s=?' % c for c in INDEX[:-1])))
# Required columns to request from /timeseries2 API.
_TIMESERIES2_COLS = [
'revision',
'revisions',
'avg',
'timestamp',
'annotations']
class Key(collections.namedtuple('Key', INDEX[:-1])):
"""Uniquely identifies a single timeseries."""
@classmethod
def FromDict(cls, *args, **kwargs):
kwargs = dict(*args, **kwargs)
kwargs.setdefault('test_case', '') # test_case is optional.
return cls(**kwargs)
def AsDict(self):
return dict(zip(self._fields, self))
def AsApiParams(self):
"""Return a dict with params for a /timeseries2 API request."""
params = self.AsDict()
if not params['test_case']:
del params['test_case'] # test_case is optional.
params['columns'] = ','.join(_TIMESERIES2_COLS)
return params
def DataFrame(rows=None):
return pandas_sqlite.DataFrame(COLUMN_TYPES, index=INDEX, rows=rows)
def _ParseIntValue(value, on_error=-1):
# Try to parse as int and, in case of error, return a pre-defined value.
try:
return int(value)
except StandardError:
return on_error
def _ParseConfigFromTestPath(test_path):
if isinstance(test_path, Key):
return test_path.AsDict()
values = test_path.split('/', len(TEST_PATH_PARTS) - 1)
if len(values) < len(TEST_PATH_PARTS):
values.append('') # Possibly missing test_case.
if len(values) != len(TEST_PATH_PARTS):
raise ValueError(test_path)
config = dict(zip(TEST_PATH_PARTS, values))
config['bot'] = '%s/%s' % (config.pop('master'), config.pop('builder'))
return config
def DataFrameFromJson(test_path, data):
if isinstance(test_path, Key):
return _DataFrameFromJsonV2(test_path, data)
else:
# TODO(crbug.com/907121): Remove when we can switch entirely to v2.
return _DataFrameFromJsonV1(test_path, data)
def _DataFrameFromJsonV2(ts_key, data):
rows = []
for point in data['data']:
point = dict(zip(_TIMESERIES2_COLS, point))
rows.append(ts_key + (
point['revision'], # point_id
point['avg'], # value
point['timestamp'], # timestamp
_ParseIntValue(point['revisions']['r_commit_pos']), # commit_pos
point['revisions'].get('r_chromium'), # chromium_rev
point['revisions'].get('r_clank'), # clank_rev
point['annotations'].get('a_tracing_uri'), # trace_url
data['units'], # units
data['improvement_direction'], # improvement_direction
))
return DataFrame(rows)
def _DataFrameFromJsonV1(test_path, data):
# The dashboard API returns an empty list if there is no recent data for the
# timeseries.
if not data:
return DataFrame()
assert test_path == data['test_path']
config = _ParseConfigFromTestPath(data['test_path'])
config['improvement_direction'] = _CODE_TO_IMPROVEMENT_DIRECTION.get(
data['improvement_direction'], 'unknown')
timeseries = data['timeseries']
# The first element in timeseries list contains header with column names.
header = timeseries[0]
rows = []
# Remaining elements contain the values for each row.
for values in timeseries[1:]:
row = config.copy()
row.update(zip(header, values))
row['point_id'] = row['revision']
row['commit_pos'] = _ParseIntValue(row['r_commit_pos'])
row['chromium_rev'] = row.get('r_chromium')
row['clank_rev'] = row.get('r_clank', None)
rows.append(tuple(row.get(k) for k in COLUMNS))
return DataFrame(rows)
def GetTimeSeries(con, test_path, extra_cond=None):
"""Get the records for all data points on the given test_path.
Returns:
A pandas.DataFrame with all records found.
"""
config = _ParseConfigFromTestPath(test_path)
params = tuple(config[c] for c in INDEX[:-1])
query = _QUERY_TIME_SERIES
if extra_cond is not None:
query = ' '.join([query, extra_cond])
return pandas.read_sql(query, con, params=params, parse_dates=['timestamp'])
def GetMostRecentPoint(con, test_path):
"""Find the record for the most recent data point on the given test_path.
Returns:
A pandas.Series with the record if found, or None otherwise.
"""
df = GetTimeSeries(con, test_path, 'ORDER BY timestamp DESC LIMIT 1')
return df.iloc[0] if not df.empty else None
| bsd-3-clause |
pradyu1993/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 3 | 1845 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
tmhm/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
qiudebo/13learn | code/matplotlib/aqy/aqy_radar_chart3.py | 2 | 2100 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'qiudebo'
import matplotlib.pyplot as plt
from math import pi
if __name__ == '__main__':
data1 = (8.5, 8.2, 7.7, 7.8, 7.9, 8.4, 8.1, 8.3, 8.1, 8.1, 10.0, 8.5)
data2 = (8.9, 8.5, 8.2, 8.0, 7.9, 8.3, 8.0, 8.1, 7.9, 8.0, 9.3, 8.9)
labels = (u"白羊座", u"双鱼座", u"水平座", u"摩羯座", u"射手座", u"天蝎座",
u"天秤座", u"处女座", u"狮子座", u"巨蟹座", u"双子座", u"金牛座")
N = len(labels)
x_as = [n / float(N) * 2 * pi for n in range(N)]
data1 += data1[:1]
x_as += x_as[:1]
data2 += data2[:1]
plt.rc('axes', linewidth=0.5, edgecolor="#888888")
ax = plt.subplot(111, polar=True)
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
ax.set_rlabel_position(0)
ax.xaxis.grid(True, color="#888888", linestyle='solid', linewidth=0.5)
ax.yaxis.grid(True, color="#888888", linestyle='solid', linewidth=0.5)
plt.xticks(x_as[:-1], [])
plt.yticks([2, 4, 6, 8, 10], ["2", "4", "6", "8", "10"])
ax.plot(x_as, data1, linewidth=0, linestyle='solid', zorder=3, label=u'我的前半生')
ax.plot(x_as, data2, linewidth=0, linestyle='solid', zorder=3, label=u'三生三世十里桃花')
ax.fill(x_as, data1, 'b', alpha=0.3)
ax.fill(x_as, data2, 'g', alpha=0.3)
plt.ylim(0, 10)
for i in range(N):
angle_rad = i / float(N) * 2 * pi
if angle_rad == 0:
ha, distance_ax = "center", 10
elif 0 < angle_rad < pi:
ha, distance_ax = "left", 1
elif angle_rad == pi:
ha, distance_ax = "center", 1
else:
ha, distance_ax = "right", 1
ax.text(angle_rad, 10 + distance_ax, labels[i],
size=10, horizontalalignment=ha, verticalalignment="center")
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# plt.legend()
plt.show() | mit |
edwinschrauwen/interpi | rcv.py | 1 | 1963 | #!/usr/bin/python3
from datetime import datetime
#import matplotlib.pyplot as pyplot
import RPi.GPIO as GPIO
#RECEIVED_SIGNAL = [[], []] #[[time of reading], [signal reading]]
#sigs = [ [], [] ]
sigs = []
MAX_DURATION = 2
RECEIVE_PIN = 14
short_delay = 100
long_delay = 180
extended_delay = 1000
message_delay = 8000
terminator_delay = 1000
if __name__ == '__main__':
GPIO.setmode(GPIO.BCM)
GPIO.setup(RECEIVE_PIN, GPIO.IN)
cumulative_time = 0
beginning_time = datetime.now()
previous_sig = -1
previous_time = datetime.now()
print('**Started recording**')
sigcount = 0
while cumulative_time < MAX_DURATION:
time_delta = datetime.now() - beginning_time
sig = GPIO.input(RECEIVE_PIN)
if sig != previous_sig:
sigtime = (datetime.now() - previous_time).microseconds
if sigtime > 0:
print( ' ' + str( sigtime ) + '* ' + str( previous_sig ) )
#print previous_sig,
if sigtime > extended_delay:
print( '-' )
sigs.append(' ')
sigcount += 1
if sigcount >= 34:
sigcount = 0
sigs.append('\n')
if previous_sig == 1:
if sigtime > short_delay:
actualsig = 1
if sigtime > long_delay:
actualsig = 0
#print actualsig,
sigs.append(str(actualsig))
#print str( sigtime ) + ' ' + str( actualsig )
previous_time = datetime.now()
previous_sig = sig
cumulative_time = time_delta.seconds
print( '**Ended recording**' )
#print len(RECEIVED_SIGNAL[0]), 'samples recorded'
GPIO.cleanup()
print( '**Processing results**' )
for i in range(len(sigs)):
# timestamp = sigs[0][i]
sig = sigs[i]
print( str(sig), end='' )
| gpl-3.0 |
rosswhitfield/mantid | qt/applications/workbench/workbench/plotting/plotscriptgenerator/test/test_plotscriptgeneratorfitting.py | 3 | 4406 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
import ast
import re
import json
import unittest
from unittest.mock import Mock
from matplotlib.backend_bases import FigureManagerBase
from mantidqt.widgets.fitpropertybrowser import FitPropertyBrowser
from workbench.plotting.plotscriptgenerator.fitting import get_fit_cmds, BASE_FIT_COMMAND, BASE_FIT_INCLUDE
EXAMPLE_FIT_PROPERTIES = {'EndX': 1018,
"Function": "name=Lorentzian,Amplitude=22078.3,"
"PeakCentre=492.226,FWHM=56.265",
"InputWorkspace": "TestWorkspace",
"MaxIterations": 5000,
"Normalise": True,
"Output": "TestWorkspaceOutput",
"OutputCompositeMembers": True, "StartX": 5.6,
"WorkspaceIndex": 4}
# This is an example of whats returned by the method getFitAlgorithmParameters in the FitPropertyBrowser
EXAMPLE_FIT_ALG_PROPERTIES = json.dumps({'name': 'Fit', 'properties': EXAMPLE_FIT_PROPERTIES, 'version': 1})
class PlotScriptGeneratorFittingTest(unittest.TestCase):
def setUp(self):
self.mock_fig = Mock()
self.mock_browser = Mock(spec=FitPropertyBrowser)
self.mock_fig.canvas.manager.fit_browser = self.mock_browser
def test_fit_commands_empty_if_no_fit(self):
self.mock_browser.fit_result_ws_name = ""
commands, headers = get_fit_cmds(self.mock_fig)
self.assertEqual(commands, [])
self.assertEqual(headers, [])
def test_returns_empty_commands_if_no_fit_browser(self):
mock_fig = Mock()
# Figure manager base has no fit browser
mock_fig.canvas.manager = Mock(spec=FigureManagerBase)
commands, headers = get_fit_cmds(mock_fig)
self.assertEqual(commands, [])
self.assertEqual(headers, [])
def test_get_fit_cmds_returns_expected_commands(self):
self.mock_browser.fit_result_ws_name = "TestWorkspace"
self.mock_browser.getFitAlgorithmParameters.return_value = EXAMPLE_FIT_ALG_PROPERTIES
commands, headers = get_fit_cmds(self.mock_fig)
self.assertEqual(headers[0], BASE_FIT_INCLUDE)
# Should be the 9 algorithm properties + the final call to Fit
# Check each is its expected value
self.assertEqual(len(commands), len(EXAMPLE_FIT_PROPERTIES) + 1)
for i in range(len(commands) - 1):
fit_property, fit_value = tuple(commands[i].split('=', 1))
self.assertEqual(EXAMPLE_FIT_PROPERTIES[fit_property], ast.literal_eval(fit_value))
def test_get_fit_commands_returns_expected_fit_algorithm_call(self):
self.mock_browser.fit_result_ws_name = "TestWorkspace"
self.mock_browser.getFitAlgorithmParameters.return_value = EXAMPLE_FIT_ALG_PROPERTIES
commands, headers = get_fit_cmds(self.mock_fig)
fit_command = commands[-1]
alg_call = re.sub(r'\([^)]*\)', '', fit_command)
fit_args = re.findall(r'\((.*?)\)', fit_command)[0].split(',')
# check that the alg call is the same
# Check correct args are passed into the Fit call
self.assertEqual(alg_call, BASE_FIT_COMMAND)
self.assertEqual(len(fit_args), len(EXAMPLE_FIT_PROPERTIES))
fit_arg_list = [fit_arg.split('=')[0] for fit_arg in fit_args]
self.assertEqual(fit_arg_list, list(EXAMPLE_FIT_PROPERTIES.keys()))
def test_get_fit_commands_returns_expected_order(self):
self.mock_browser.fit_result_ws_name = "TestWorkspace"
self.mock_browser.getFitAlgorithmParameters.return_value = EXAMPLE_FIT_ALG_PROPERTIES
commands, headers = get_fit_cmds(self.mock_fig)
fit_properties = [fit_command.split('=')[0] for fit_command in commands]
# Test some of the properties are in the correct place
self.assertEqual(fit_properties[0], 'Function')
self.assertEqual(fit_properties[2], 'WorkspaceIndex')
self.assertEqual(fit_properties[5], 'EndX')
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
openwns/wrowser | openwns/wrowser/Tools.py | 1 | 9632 | ###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 16, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: [email protected]
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from PyQt4 import QtCore, QtGui
def classAndInstanceDict(instance):
"""Join class and instance attribute dicts.
"""
return dict(instance.__class__.__dict__.items() + instance.__dict__.items())
def uniqElements(probeNames):
def numCommonElements(probeNames):
for i in range(len(probeNames[0].split(';')[0].split('.'))) :
prefix = probeNames[0].split('.')[0:i]
for probe in probeNames :
if prefix != probe.split('.')[0:i] :
return i-1
i+=1
return i-1
uniqParts = []
uniqStart = numCommonElements(probeNames)
for probe in probeNames :
probeNameParts=len(probe.split('.'))
uniqParts.append(".".join(probe.split('.')[uniqStart:]))
return uniqParts
class ParameterWriter:
out = None
def __init__(self, outstream):
self.out = outstream
def write(self, name, value, comment=''):
if len(comment)>0:
comment = " #"+comment
if type(value) == str :
self.out.write(" "+name + " = \'" + value + "\'")
else:
self.out.write(" "+name + " = " + str(value))
self.out.write(comment+"\n")
class Chameleon:
"""Class with variable attributes.
On instantiation of Chameleon, the object will get as attributes all keyword parameters
you specify.
"""
def __init__(self, **attribs):
for name, value in attribs.items():
setattr(self, name, value)
def dict2string(dic, separator = "; ", displayNoneValue = False):
"""Convert a dict to a nice string.
"""
s = ""
for key, value in dic.items():
if len(s) > 0:
s += separator
if key != None or displayNoneValue:
s += str(key)
if value != None or displayNoneValue:
s += ": " + str(value)
return s
class ObjectFilterError(Exception):
"""Raised, if the stringexpression could not be evaluated.
"""
def __init__(self, stringexpression):
self.stringexpression = stringexpression
def __str__(self):
return "Could not evaluate '" + self.stringexpression + "'"
def objectFilter(stringexpression, objectList, viewGetter = classAndInstanceDict):
"""Return all objects in 'objectList' for which 'stringexpression' applies.
'stringexpression' must be a string containing a valid python expression that
can be evaluated against the entries in the dict returned by 'viewGetter'
for all instances in 'objectList'. 'viewGetter' defaults to returning the
attributes of each object. If you want to specify that 'stringexpression'
should be checked against the dict attribute foo, use 'viewGetter = lambda x: x.foo'.
Or if you want to check against the attributes of the attribute foo (which
then is a class instance), use 'viewGetter = lambda x: classAndInstanceDict(x.foo)'.
"""
instanceList = []
for instance in objectList:
try:
if eval(stringexpression,
# we don't want globals...
{},
viewGetter(instance)):
instanceList.append(instance)
except NameError:
instanceList.append(instance)
except:
raise ObjectFilterError(stringexpression)
return instanceList
def convert(expression):
"""Convert the string 'expression' to its best python representation.
convert('1') will return an int, convert('2.3') will return a float and so on.
If 'expression' cannot be converted returns 'expression' as string.
"""
try:
return eval(expression, {}, {})
except:
return expression
class Observable(object):
def __init__(self):
self.emitter = QtCore.QObject()
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
self.emitter.emit(QtCore.SIGNAL("changed"), name, value, self)
self.emitter.emit(QtCore.SIGNAL(name + "_changed"), value)
class Observing:
def observe(self, callable, subject, attribName = ""):
if attribName != "":
self.connect(subject.emitter, QtCore.SIGNAL(attribName + "_changed"), callable)
else:
self.connect(subject.emitter, QtCore.SIGNAL("changed"), callable)
class URI(Observable):
def __init__(self,
scheme = "",
user = "",
password = "",
host = "",
port = "",
database = "",
parameters = ""):
Observable.__init__(self)
self.scheme = scheme
self.user = user
self.password = password
self.host = host
self.port = port
self.database = database
self.parameters = parameters
def toString(self, withPassword = False):
uri = self.scheme + "://"
if len(self.user) > 0:
uri += self.user
if withPassword and len(self.password) > 0:
uri += ":" + self.password
uri += "@"
if len(self.host) > 0:
uri += self.host
if len(self.port) > 0:
uri +=":" + self.port
if not self.database.startswith("/"):
uri += "/"
uri += self.database
if len(self.parameters) > 0:
uri += "?" + self.parameters
return uri
def __str__(self):
return self.toString()
def parse(self, uri):
def split(s, sep, minsplit, maxsplit):
l = s.split(sep, maxsplit - 1)
while len(l) < minsplit:
l += [""]
return l
self.scheme, location = split(uri, "://", 2, 2)
loginHostPort, databaseParameters = split(location, "/", 2, 2)
self.database, self.parameters = split(databaseParameters, "?", 2, 2)
if "@" in loginHostPort:
login, hostPort = split(loginHostPort, "@", 2, 2)
else:
login = ""
hostPort = loginHostPort
if ":" in login:
self.user, self.password = split(login, ":", 2, 2)
else:
self.user = login
self.host, self.port = split(hostPort, ":", 2, 2)
def renderLineSampleImage(line, width, dpi = 100):
import Debug
# Debug.printCall(None, (line, width))
from matplotlib.backends.backend_agg import RendererAgg as Renderer
from matplotlib.lines import Line2D
useValue = True
try:
from matplotlib.transforms import Value
except ImportError:
useValue = False
from PyQt4 import QtGui
attributes = ["antialiased", "color", "dash_capstyle", "dash_joinstyle",
"linestyle", "linewidth", "marker", "markeredgecolor",
"markeredgewidth", "markerfacecolor", "markersize",
"solid_capstyle", "solid_joinstyle"]
lineAttributes = dict()
for attribute in attributes:
lineAttributes[attribute] = getattr(line, "get_" + attribute)()
height = max(lineAttributes["linewidth"], lineAttributes["markersize"] * 1.4 + 2 + 2*lineAttributes["markeredgewidth"])
pixmapWidth = int(width + lineAttributes["markersize"] * 1.4 + 2 + 2*lineAttributes["markeredgewidth"]) + 1
markerSize = lineAttributes["markersize"]
if(useValue):
renderer = Renderer(pixmapWidth, height, Value(dpi))
else:
renderer = Renderer(pixmapWidth, height, dpi)
linePos = int(height / 2 + 1)
sampleLine = Line2D([markerSize, pixmapWidth - markerSize], [linePos, linePos], **lineAttributes)
sampleLine.draw(renderer)
lineImageStr = renderer.tostring_argb()
lineARGB = [map(ord, lineImageStr[i:i+4]) for i in xrange(0, len(lineImageStr), 4)]
image = QtGui.QImage(pixmapWidth, height, QtGui.QImage.Format_ARGB32)
for x in xrange(pixmapWidth):
for y in xrange(int(height)):
argb = lineARGB[x + y * pixmapWidth]
image.setPixel(x, y, QtGui.qRgba(argb[1], argb[2], argb[3], argb[0]))
return image
class ProbeFilterValidator(QtGui.QValidator):
def __init__(self, probesModel, parent):
QtGui.QValidator.__init__(self, parent)
self.probesModel = probesModel
def validate(self, input, pos):
if len(self.probesModel.getProbeNamesFilteredBy(str(input))) == 0:
return (QtGui.QValidator.Intermediate, pos)
else:
return (QtGui.QValidator.Acceptable, pos)
| gpl-2.0 |
medifle/python_6.00.1x | HashGraphs/HashGraphs.py | 1 | 1819 | import matplotlib.pyplot as plt
import string
def loadWords():
'''Returns a list containing the words from PSET 4'''
fin = open('/Users/bolo/githubRepo/python_6.00.1x/HashGraphs/words.txt', 'r')
wordList = []
for line in fin:
wordList.append(line.strip().lower())
return wordList
fin.close()
##--------------------------------------------------------
## The following three hash functions were given to us.
def firstHashFunction(s):
return string.ascii_lowercase.index(s[0])
def secondHashFunction(s):
return string.ascii_lowercase.index(s[-1])
def thirdHashFunction(s):
total = 0
for char in s:
total += string.ascii_lowercase.index(char)
return total % 26
##----------------------------------------------------------
def doHashing(hashFunction):
'''Takes as argument one of the three hash functions defined.
Returns a dictionary containing a frequency distribution of all integers
returned by the hash function.'''
wordList = loadWords()
hashedDict = {}
for w in wordList:
#Convert each word in the wordList to its corresponding hash.
hashedWord = hashFunction(w)
#Create a frequency distribution of all the integers returned by the hash function.
hashedDict[hashedWord] = hashedDict.get(hashedWord, 0) + 1
return hashedDict
def plotHash(hashFunction):
''' Takes as argument one of the hash functions defined. Plots frequency distribution
of all the integers returned by the hash function.'''
plt.bar(doHashing(hashFunction).keys(),doHashing(hashFunction).values(),facecolor='#9999ff', edgecolor='white')
plt.ylabel('Number of words in wordlist')
plt.xlabel('Hash')
plt.title(hashFunction)
plt.axis([0, 26, 0, 10000])
plt.show()
| mit |
apacha/MusicSymbolClassifier | ModelTrainer/reporting/test_classification.py | 1 | 9765 | from sklearn import datasets
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics.tests.test_classification import make_prediction
from sklearn.utils.testing import assert_warns_message, assert_equal
from reporting.sklearn_reporting import classification_report
import numpy as np
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
weighted avg 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_long_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue", "green" * 5, "red"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
greengreengreengreengreen 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_labels_target_names_unequal_length():
y_true = [0, 0, 2, 0, 0]
y_pred = [0, 2, 2, 0, 0]
target_names = ['class 0', 'class 1', 'class 2']
assert_warns_message(UserWarning,
"labels size, 2, does not "
"match size of target_names, 3",
classification_report,
y_true, y_pred, target_names=target_names)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
weighted avg 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report_with_samples_averaging():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
samples avg 0.46 0.42 0.40 104
"""
report = classification_report(y_true, y_pred, average='samples')
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_micro_averaging():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
micro avg 0.53 0.53 0.53 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, average='micro')
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_macro_averaging():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
macro avg 0.53 0.60 0.51 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, average='macro')
assert_equal(report, expected_report)
def test_classification_report_binary_averaging():
y_true = [0, 1, 1, 1, 0, 1, 1, 0]
y_pred = [0, 0, 1, 1, 1, 0, 1, 0]
# print classification report with class names
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 3
1 0.75 0.60 0.67 5
binary avg 0.75 0.60 0.67 8
"""
report = classification_report(y_true, y_pred, average='binary')
assert_equal(report, expected_report)
| mit |
hsiaoching/streethunt-matcher | streethunt.py | 2 | 8126 | # -*- coding: utf-8 -*-
import ParsePy
import numpy as np
import cv2
import sys, os, urllib, csv, json, time, shutil
import geopy
from math import floor
from util import *
from geopy import distance
from operator import itemgetter
import matplotlib.pyplot as plt
"""
Data Model:
db_dict = {
'file0.jpg':{
'lat':23.7584929,
'lnt':121.4858102,:
'descps':[DESCP0_LIST, DESCP1_LIST, DESCP2_LIST, DESCP3_LIST]
'file1.jpg':{
...
}
...
}
"""
data_points = []
testcase_data = None
testcase_video = None
db_dict = {}
dbname = "database"
dirname = "XinYiRd-East"
data_file_name = "data.json"
NEAR_DISTANCE_THRESHOLD = 25
API_KEY = "AIzaSyBS-HaMAHhazScAOwdTOaclJEGBNptWFss"
N_SLICE = 4
def fetch_data(nodes, street_name, direction):
#print "Fetching data for " + street_name
data = []
#dirname = "%s_%s" % (street_name, direction)
global dbname, dirname, db_dict
for n in nodes:
print str(nodes.index(n) + 1) + "/" + str(len(nodes)) + "(" + str(n.lat) + ", " + str(n.lng) + ")"
data.append([n.lat, n.lng])
for i in range(8):
url = imageurl(n.lat, n.lng, i * 45)
img_file_name = get_img_file_name(n.lat, n.lng, i)
if not db_dict.has_key(img_file_name):
db_dict[img_file_name] = {
'lat':n.lat,
'lnt':n.lng
}
save_image(url, dbname, dirname, "sv_%f_%f_%d.jpg" % (n.lat, n.lng, i))
f = open(os.path.join(dbname, dirname, data_file_name), 'w')
json.dump(db_dict, f, indent=2)
f.close()
def load_db(street_name=u"信義路", direction=u"東"):
#dirname = street_name + "_" + direction
global dirname
dirpath = os.path.join(dbname, dirname)
query = ParsePy.ParseQuery("Node")
query = query.limit(10000).eq("streetName", street_name).eq("direction", direction)
nodes = query.fetch()
print "There're %d nodes on the server." % (len(nodes))
global files
if not os.path.exists(dirpath):
fetch_data(nodes, street_name, direction)
else:
files = [f for f in os.listdir(dirpath) if f[-3:] == "jpg"]
data_file_path = os.path.join(dbname, dirname, data_file_name)
if os.path.isfile(data_file_path):
f = open(data_file_path, 'r')
global db_dict
db_dict = json.load(f)
if db_dict is None:
db_dict = {}
if not ((len(files) / 8 == len(nodes)) and (len(db_dict) / 8 == len(nodes))):
fetch_data(nodes, street_name, direction)
else:
print "Your database is up-to-date."
else:
print "data.json file does not exists."
fetch_data(nodes, street_name, direction)
global data_points
data_points = [geopy.Point(n.lat, n.lng) for n in nodes]
def compute_db_descts():
print "Start computing descriptors for images in database."
has_updated = False
global db_dict, dbname, dirname, data_file_name
print "There are currently", len(db_dict), "image data in db_dict."
start = time.time()
n_files = len(db_dict)
for i in range(n_files):
f = db_dict.keys()[i]
if not db_dict[f].has_key('descps'):
print "[%04d/%04d] Computing descriptors for %s" % (i + 1, n_files, f)
has_updated = True
file_path = os.path.join(dbname, dirname, f)
descps = [d.tolist() for d in get_descriptor(f_path=file_path, n_slice=N_SLICE)[1]]
db_dict[f]['descps'] = descps
end = time.time()
print "Computing finished. Elapsed time: %.5f sec." % ((end - start) / 1000)
if has_updated:
f = open(os.path.join(dbname, dirname, data_file_name), 'w')
json.dump(db_dict, f, indent=4)
f.close()
def get_nearby_points(lat, lnt, threshold=NEAR_DISTANCE_THRESHOLD):
p = geopy.Point(lat, lnt)
near_points = [dp for dp in data_points if distance.distance(p, dp).m < threshold]
#for np in near_points:
# for i in range(8):
# print "sv_%f_%f_%d.jpg" % (np.latitude, np.longitude, i)
print "Found", len(near_points), "points"
return near_points
def get_clip_data(video_name="IMG_2124.mov", id="HK6Kyn3LMr"):
#default id is for IMG_2124.mov
global testcase_data
testcase_data = ParsePy.ParseQuery("Clip").get(id)
video_path = os.path.join("testcase", video_name)
if os.path.isfile(video_path):
global testcase_video
testcase_video = cv2.VideoCapture(video_path)
if not testcase_video.isOpened():
testcase_video = None
print "ERROR: video file isn't opened"
else:
print "Plase place " + video_name + " under directory testcase/"
def compare_frame(frame, lat, lnt, n_slice=4):
near_points = get_nearby_points(lat, lnt, NEAR_DISTANCE_THRESHOLD)
file_set = []
global dirname
for np in near_points:
for i in range(8):
file_set.append("sv_%f_%f_%d.jpg" % (np.latitude, np.longitude, i))
frame_descp = get_descriptor(img=frame, n_slice=N_SLICE)[1]
db_descps = []
for f in file_set:
descp = None
print f
if db_dict[f].has_key('descps'):
print "Descriptors are already computed!"
descp = db_dict[f]['descps']
else:
file_path = os.path.join(dbname, dirname, f)
descp = get_descriptor(f_path=file_path, n_slice=N_SLICE)[1]
db_dict[f]['descps'] = descp
db_descps.append(descp)
min_dists = [get_min_dist(d, frame_descp) for d in db_descps]
indices, sorted_min_dists = zip(*sorted(enumerate(min_dists), key=itemgetter(1)))
print "********************"
print "Ranking:"
for i in indices:
print file_set[i], min_dists[i]
return file_set, indices, min_dists[indices[0]]
def match():
min_dists = []
changes = []
cmp_lat = -1.0
cmp_lnt = -1.0
n_frame = testcase_video.get(7)
output_dir_name = "output"
if not os.path.exists(output_dir_name):
os.mkdir(output_dir_name)
for i in range(len(testcase_data.dataPoints)):
#for i in range(1):
#i = len(testcase_data.dataPoints) - 7
lat = testcase_data.dataPoints[i]['location']['latitude']
lnt = testcase_data.dataPoints[i]['location']['longitude']
print "[" + str(i * 0.2) + "] (" + str(lat) + ", " + str(lnt) + ")"
if lat != cmp_lat and lnt != cmp_lnt:
#print "!"
changes.append(i)
cmp_lat = lat
cmp_lnt = lnt
print changes
#for i in changes:
for i in range(1):
time = i * 0.2
frame = n_frame * i * 0.2 / testcase_data.length
print "[" + str(i * 0.2) + "] (" + str(lat) + ", " + str(lnt) + ") ===> " + str(frame)
img_frame = get_specific_frame(testcase_video, frame)
cv2.imwrite('%d.jpg' % (i + 1), img_frame)
files, indices, min_dist = compare_frame(get_specific_frame(testcase_video, frame), lat, lnt)
for j in range(len(files)):
src_path = os.path.join(dbname, dirname, files[indices[j]])
des_path = os.path.join(output_dir_name, "%d_%d.jpg" % (i + 1, j + 1))
shutil.copy2(src_path, des_path)
print "[%.1f] %.5f" % (time, min_dist)
min_dists.append(min_dist)
for i in range(len(changes)):
print "[%.1f] %.5f" % (changes[i] * 0.2, min_dists[i])
print "Saving db_dict before exit."
plt.plot(min_dists)
plt.show()
def main():
init_parse()
load_db()
#compute_db_descts()
get_clip_data()
n_frame = floor(testcase_video.get(7))
print "Total " + str(n_frame) + " frames"
match()
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
raise
except e:
print e
#print "Saving db_dict before exit."
#f = open(os.path.join(dbname, dirname, data_file_name), 'w')
#json.dump(db_dict, f, indent=2)
#f.close()
| mit |
mkness/TheCannon | code/deprecated/makeplot_R4.py | 1 | 4257 | #!/usr/bin/python
import numpy
from numpy import savetxt
import matplotlib
from matplotlib import pyplot
import scipy
from scipy import interpolate
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
s.set_family('serif')
s.set_size(14)
from matplotlib import rc
rc('text', usetex=False)
rc('font', family='serif')
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib
from matplotlib import pyplot
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
s.set_family('serif')
rcParams["xtick.labelsize"] = 14
rcParams["ytick.labelsize"] = 14
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
majorLocator = MultipleLocator(5)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(5)
yminorLocator = MultipleLocator(10)
yminorLocator2 = MultipleLocator(25)
xminorLocator = MultipleLocator(5)
yminorLocator = MultipleLocator(5)
ymajorLocator = MultipleLocator(50)
xmajorLocator = MultipleLocator(10)
rcParams['figure.figsize'] = 15.0, 10.0
fig, temp = pyplot.subplots(5,1, sharex=True, sharey=False)
ax1 = temp[0]
ax2 = temp[1]
ax3 = temp[2]
ax4 = temp[3]
ax5 = temp[4]
#plot(dataall[:, 0, 0], 1. * coeffs[:, 0]) # mean spectra
#plot(dataall[:, 0, 0], 1. * coeffs[:, 1]) # teff
#plot(dataall[:, 0, 0], 1. * coeffs[:, 2]) # log g
#plot(dataall[:, 0, 0], 1. * coeffs[:, 3]) # feh
covs_mean = 1/invcovs[:,:,0]*10**-17
covs_t = 1/invcovs[:,:,1]*10**-17
covs_g = 1/invcovs[:,:,2]*10**-17
covs_feh = 1/invcovs[:,:,3]*10**-17
#ax1.plot(dataall[:, 0, 0], 1. * coeffs[:, 0],color = 'k' ,linewidth = 2) # median
ax1.plot(dataall[:, 0, 0], 1. * coeffs[:, 1], color = 'green' ,linewidth = 2) # teff
ax2.plot(dataall[:, 0, 0], 1. * coeffs[:, 2], color = 'blue' ,linewidth = 2) # g
ax3.plot(dataall[:, 0, 0], 1. * coeffs[:, 3],color = 'red' ,linewidth = 2) # feh
ax4.plot(dataall[:, 0, 0], 1. * coeffs[:, 0],color = 'k' ,linewidth = 2) # median
ax5.plot(dataall[:, 0, 0], 1. * coeffs[:, 3], color = 'red',linewidth = 2) # feh
ax5.plot(dataall[:, 0, 0], 1. * coeffs[:, 2], color = 'blue',linewidth = 2) # g
ax5.plot(dataall[:, 0, 0], 1000. * coeffs[:, 1], color = 'green',linewidth = 2) # teff
l1a = 15390
l2a = 15394
l1b = 15693
l2b = 15700
l1c = 15956
l2c = 15960
l1d = 16202
l2d = 16210
l1e = 16116
l2e = 16122
l1f = 1666.5
l2f = 16172.5
val0 = 16208.6
ax1.vlines(val0, -1,2, linestyle = 'dashed', linewidth = 2)
ax2.vlines(val0, -1,2, linestyle = 'dashed', linewidth = 2)
ax3.vlines(val0, -1,2, linestyle = 'dashed', linewidth = 2)
ax4.vlines(val0, -1,2, linestyle = 'dashed', linewidth = 2)
ax5.vlines(val0, -1,2, linestyle = 'dashed', linewidth = 2)
ax2.set_xlim(l1d - 20, l2d + 20 )
ax1.set_ylim(-0.00046 ,0.00046)
ax2.set_ylim(-0.5 ,0.5)
ax3.set_ylim(-0.5 ,0.5)
ax4.set_ylim(0.6 ,1.2)
ax5.set_ylim(-0.5 ,0.5)
ax1.set_xlim(l1d - 20, l2d + 20 )
ax2.set_xlim(l1d - 20, l2d + 20 )
ax3.set_xlim(l1d - 20, l2d + 20 )
ax4.set_xlim(l1d - 20, l2d + 20 )
ax5.set_xlim(l1d - 20, l2d + 20 )
ax1.text(l1b-19, 0.00004, "Teff coeff" , fontsize = 12)
ax2.text(l1b-19, 0.3, "log g coeff" , fontsize = 12)
ax3.text(l1b-19, 0.3, "[Fe/H] coeff" , fontsize = 12)
ax4.text(l1b-19, 1.1, "mean spectra" , fontsize = 12)
ax5.text(l1b-19, 0.3, "[Fe/H] coeff, log g coeff, Teff coeff*1000" , fontsize = 12)
ax1.set_title("REGION 4 USED FOR [Fe/H] INDEX")
axlist = [ax1,ax2,ax3,ax4,ax5]
for each in axlist:
each.plot([l1d-30,l2d+30], [0,0],'k--')
ax4.plot([l1b-30, l2d+30],[1,1], 'k--')
ax1.axvspan(l1d, l2d, facecolor='c', alpha=0.1)
ax2.axvspan(l1d, l2d, facecolor='c', alpha=0.1)
ax3.axvspan(l1d, l2d, facecolor='c', alpha=0.1)
ax4.axvspan(l1d, l2d, facecolor='c', alpha=0.1)
ax5.axvspan(l1d, l2d, facecolor='c', alpha=0.1)
ax5.set_xlabel("Wavelength $\AA$", fontsize = 20)
ax1.set_ylabel("coeff a1", fontsize = 20)
ax2.set_ylabel("coeff a2", fontsize = 20)
ax3.set_ylabel("coeff a3", fontsize = 20)
ax4.set_ylabel("coeff a0", fontsize = 20)
ax4.set_ylabel("coeff a0", fontsize = 20)
ax5.set_ylabel("coeff a1,a2,a3", fontsize = 20)
fig.subplots_adjust(hspace=0)
fig.subplots_adjust(wspace=0)
| mit |
mayblue9/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
henridwyer/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
chrisburr/scikit-learn | sklearn/utils/__init__.py | 22 | 12134 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .deprecation import deprecated
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
from ..exceptions import ConvergenceWarning as ConvergenceWarning_
from ..exceptions import DataConversionWarning as DataConversionWarning_
class ConvergenceWarning(ConvergenceWarning_):
pass
ConvergenceWarning = deprecated("ConvergenceWarning has been moved "
"into the sklearn.exceptions module. "
"It will not be available here from "
"version 0.19")(ConvergenceWarning)
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning_)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
| bsd-3-clause |
rvraghav93/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 48 | 2080 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
# #############################################################################
# Generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal(size=n_samples)
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples // 2], y[:n_samples // 2]
X_test, y_test = X[n_samples // 2:], y[n_samples // 2:]
# #############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
# #############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
gojira/tensorflow | tensorflow/python/estimator/canned/dnn_linear_combined_test.py | 11 | 33691 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn_linear_combined.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import dnn_linear_combined
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import linear_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer as optimizer_lib
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, self._dnn_only_model_fn)
def _dnn_only_model_fn(self,
features,
labels,
mode,
head,
hidden_units,
feature_columns,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
return dnn_linear_combined._dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=[],
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
# A function to mimic linear-regressor init reuse same tests.
def _linear_regressor_fn(feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyRegressorPartitionerTest(
linear_testing_utils.BaseLinearRegressorPartitionerTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPartitionerTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorEvaluationTest(
linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorPredictTest(
linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPredictTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorIntegrationTest(
linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorTrainingTest(
linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorTrainingTest.__init__(
self, _linear_regressor_fn)
def _linear_classifier_fn(feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyClassifierTrainingTest(
linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierTrainingTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierClassesEvaluationTest(
linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierPredictTest(
linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierPredictTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierIntegrationTest(
linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class DNNLinearCombinedRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
# A function to mimic dnn-classifier init reuse same tests.
def _dnn_classifier_fn(hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
# A function to mimic dnn-regressor init reuse same tests.
def _dnn_regressor_fn(hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
class DNNLinearCombinedClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = self._as_label(np.reshape(data[:batch_size], (batch_size, 1)))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 2
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes-1., batch_size * input_dimension,
dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
class DNNLinearCombinedTests(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, real_optimizer, var_name_prefix):
"""Verifies global_step is None and var_names start with given prefix."""
def _minimize(loss, global_step=None, var_list=None):
self.assertIsNone(global_step)
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
var_names = [var.name for var in trainable_vars]
self.assertTrue(
all([name.startswith(var_name_prefix) for name in var_names]))
# var is used to check this op called by training.
with ops.name_scope(''):
var = variables_lib.Variable(0., name=(var_name_prefix + '_called'))
with ops.control_dependencies([var.assign(100.)]):
return real_optimizer.minimize(loss, global_step, var_list)
optimizer_mock = test.mock.NonCallableMagicMock(
spec=optimizer_lib.Optimizer, wraps=real_optimizer)
optimizer_mock.minimize = test.mock.MagicMock(wraps=_minimize)
return optimizer_mock
def test_train_op_calls_both_dnn_and_linear(self):
opt = gradient_descent.GradientDescentOptimizer(1.)
x_column = feature_column.numeric_column('x')
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[0.], [1.]])},
y=np.array([[0.], [1.]]),
batch_size=1,
shuffle=False)
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[x_column],
# verifies linear_optimizer is used only for linear part.
linear_optimizer=self._mock_optimizer(opt, 'linear'),
dnn_hidden_units=(2, 2),
dnn_feature_columns=[x_column],
# verifies dnn_optimizer is used only for linear part.
dnn_optimizer=self._mock_optimizer(opt, 'dnn'),
model_dir=self._model_dir)
est.train(input_fn, steps=1)
# verifies train_op fires linear minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'linear_called'))
# verifies train_op fires dnn minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'dnn_called'))
def test_dnn_and_linear_logits_are_added(self):
with ops.Graph().as_default():
variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights')
variables_lib.Variable([2.0], name='linear/linear_model/bias_weights')
variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel')
variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias')
variables_lib.Variable([[5.0]], name='dnn/logits/kernel')
variables_lib.Variable([6.0], name='dnn/logits/bias')
variables_lib.Variable(1, name='global_step', dtype=dtypes.int64)
linear_testing_utils.save_variables_to_ckpt(self._model_dir)
x_column = feature_column.numeric_column('x')
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[x_column],
dnn_hidden_units=[1],
dnn_feature_columns=[x_column],
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)
# linear logits = 10*1 + 2 = 12
# dnn logits = (10*3 + 4)*5 + 6 = 176
# logits = dnn + linear = 176 + 12 = 188
self.assertAllClose(
{
prediction_keys.PredictionKeys.PREDICTIONS: [188.],
},
next(est.predict(input_fn=input_fn)))
class DNNLinearCombinedWarmStartingTest(test.TestCase):
def setUp(self):
# Create a directory to save our old checkpoint and vocabularies to.
self._ckpt_and_vocab_dir = tempfile.mkdtemp()
# Make a dummy input_fn.
def _input_fn():
features = {
'age': [[23.], [31.]],
'city': [['Palo Alto'], ['Mountain View']],
}
return features, [0, 1]
self._input_fn = _input_fn
def tearDown(self):
# Clean up checkpoint / vocab dir.
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._ckpt_and_vocab_dir)
def test_classifier_basic_warm_starting(self):
"""Tests correctness of DNNLinearCombinedClassifier default warm-start."""
age = feature_column.numeric_column('age')
city = feature_column.embedding_column(
feature_column.categorical_column_with_vocabulary_list(
'city', vocabulary_list=['Mountain View', 'Palo Alto']),
dimension=5)
# Create a DNNLinearCombinedClassifier and train to save a checkpoint.
dnn_lc_classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
linear_optimizer='SGD',
dnn_optimizer='SGD')
dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second DNNLinearCombinedClassifier, warm-started from the first.
# Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't
# have accumulator values that change).
warm_started_dnn_lc_classifier = (
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
n_classes=4,
linear_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
dnn_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=dnn_lc_classifier.model_dir))
warm_started_dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_dnn_lc_classifier.get_variable_names():
self.assertAllClose(
dnn_lc_classifier.get_variable_value(variable_name),
warm_started_dnn_lc_classifier.get_variable_value(variable_name))
def test_regressor_basic_warm_starting(self):
"""Tests correctness of DNNLinearCombinedRegressor default warm-start."""
age = feature_column.numeric_column('age')
city = feature_column.embedding_column(
feature_column.categorical_column_with_vocabulary_list(
'city', vocabulary_list=['Mountain View', 'Palo Alto']),
dimension=5)
# Create a DNNLinearCombinedRegressor and train to save a checkpoint.
dnn_lc_regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
model_dir=self._ckpt_and_vocab_dir,
linear_optimizer='SGD',
dnn_optimizer='SGD')
dnn_lc_regressor.train(input_fn=self._input_fn, max_steps=1)
# Create a second DNNLinearCombinedRegressor, warm-started from the first.
# Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't
# have accumulator values that change).
warm_started_dnn_lc_regressor = (
dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
linear_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
dnn_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=dnn_lc_regressor.model_dir))
warm_started_dnn_lc_regressor.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_dnn_lc_regressor.get_variable_names():
self.assertAllClose(
dnn_lc_regressor.get_variable_value(variable_name),
warm_started_dnn_lc_regressor.get_variable_value(variable_name))
def test_warm_starting_selective_variables(self):
"""Tests selecting variables to warm-start."""
age = feature_column.numeric_column('age')
city = feature_column.embedding_column(
feature_column.categorical_column_with_vocabulary_list(
'city', vocabulary_list=['Mountain View', 'Palo Alto']),
dimension=5)
# Create a DNNLinearCombinedClassifier and train to save a checkpoint.
dnn_lc_classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
linear_optimizer='SGD',
dnn_optimizer='SGD')
dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second DNNLinearCombinedClassifier, warm-started from the first.
# Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't
# have accumulator values that change).
warm_started_dnn_lc_classifier = (
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
n_classes=4,
linear_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
dnn_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
# The provided regular expression will only warm-start the deep
# portion of the model.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=dnn_lc_classifier.model_dir,
vars_to_warm_start='.*(dnn).*')))
warm_started_dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_dnn_lc_classifier.get_variable_names():
if 'dnn' in variable_name:
self.assertAllClose(
dnn_lc_classifier.get_variable_value(variable_name),
warm_started_dnn_lc_classifier.get_variable_value(variable_name))
elif 'linear' in variable_name:
linear_values = warm_started_dnn_lc_classifier.get_variable_value(
variable_name)
# Since they're not warm-started, the linear weights will be
# zero-initialized.
self.assertAllClose(np.zeros_like(linear_values), linear_values)
if __name__ == '__main__':
test.main()
| apache-2.0 |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/pandas/io/tests/test_ga.py | 5 | 7200 | import os
from datetime import datetime
import nose
import pandas as pd
from pandas import DataFrame
from pandas.util.testing import network, assert_frame_equal, with_connectivity_check
from numpy.testing.decorators import slow
import pandas.util.testing as tm
try:
import httplib2
import pandas.io.ga as ga
from pandas.io.ga import GAnalytics, read_ga
from pandas.io.auth import AuthenticationConfigError, reset_default_token_store
from pandas.io import auth
except ImportError:
raise nose.SkipTest("need httplib2 and auth libs")
class TestGoogle(tm.TestCase):
_multiprocess_can_split_ = True
def test_remove_token_store(self):
auth.DEFAULT_TOKEN_FILE = 'test.dat'
with open(auth.DEFAULT_TOKEN_FILE, 'w') as fh:
fh.write('test')
reset_default_token_store()
self.assertFalse(os.path.exists(auth.DEFAULT_TOKEN_FILE))
@slow
@network
def test_getdata(self):
try:
end_date = datetime.now()
start_date = end_date - pd.offsets.Day() * 5
end_date = end_date.strftime('%Y-%m-%d')
start_date = start_date.strftime('%Y-%m-%d')
reader = GAnalytics()
df = reader.get_data(
metrics=['avgTimeOnSite', 'visitors', 'newVisits',
'pageviewsPerVisit'],
start_date=start_date,
end_date=end_date,
dimensions=['date', 'hour'],
parse_dates={'ts': ['date', 'hour']})
assert isinstance(df, DataFrame)
assert isinstance(df.index, pd.DatetimeIndex)
assert len(df) > 1
assert 'date' not in df
assert 'hour' not in df
assert df.index.name == 'ts'
assert 'avgTimeOnSite' in df
assert 'visitors' in df
assert 'newVisits' in df
assert 'pageviewsPerVisit' in df
df2 = read_ga(
metrics=['avgTimeOnSite', 'visitors', 'newVisits',
'pageviewsPerVisit'],
start_date=start_date,
end_date=end_date,
dimensions=['date', 'hour'],
parse_dates={'ts': ['date', 'hour']})
assert_frame_equal(df, df2)
except AuthenticationConfigError:
raise nose.SkipTest("authentication error")
@slow
@with_connectivity_check("http://www.google.com")
def test_iterator(self):
try:
reader = GAnalytics()
it = reader.get_data(
metrics='visitors',
start_date='2005-1-1',
dimensions='date',
max_results=10, chunksize=5)
df1 = next(it)
df2 = next(it)
for df in [df1, df2]:
assert isinstance(df, DataFrame)
assert isinstance(df.index, pd.DatetimeIndex)
assert len(df) == 5
assert 'date' not in df
assert df.index.name == 'date'
assert 'visitors' in df
assert (df2.index > df1.index).all()
except AuthenticationConfigError:
raise nose.SkipTest("authentication error")
def test_v2_advanced_segment_format(self):
advanced_segment_id = 1234567
query = ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=advanced_segment_id)
assert query['segment'] == 'gaid::' + str(advanced_segment_id), "An integer value should be formatted as an advanced segment."
def test_v2_dynamic_segment_format(self):
dynamic_segment_id = 'medium==referral'
query = ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=dynamic_segment_id)
assert query['segment'] == 'dynamic::ga:' + str(dynamic_segment_id), "A string value with more than just letters and numbers should be formatted as a dynamic segment."
def test_v3_advanced_segment_common_format(self):
advanced_segment_id = 'aZwqR234'
query = ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=advanced_segment_id)
assert query['segment'] == 'gaid::' + str(advanced_segment_id), "A string value with just letters and numbers should be formatted as an advanced segment."
def test_v3_advanced_segment_weird_format(self):
advanced_segment_id = '_aZwqR234-s1'
query = ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=advanced_segment_id)
assert query['segment'] == 'gaid::' + str(advanced_segment_id), "A string value with just letters, numbers, and hyphens should be formatted as an advanced segment."
def test_v3_advanced_segment_with_underscore_format(self):
advanced_segment_id = 'aZwqR234_s1'
query = ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=advanced_segment_id)
assert query['segment'] == 'gaid::' + str(advanced_segment_id), "A string value with just letters, numbers, and underscores should be formatted as an advanced segment."
@slow
@with_connectivity_check("http://www.google.com")
def test_segment(self):
try:
end_date = datetime.now()
start_date = end_date - pd.offsets.Day() * 5
end_date = end_date.strftime('%Y-%m-%d')
start_date = start_date.strftime('%Y-%m-%d')
reader = GAnalytics()
df = reader.get_data(
metrics=['avgTimeOnSite', 'visitors', 'newVisits',
'pageviewsPerVisit'],
start_date=start_date,
end_date=end_date,
segment=-2,
dimensions=['date', 'hour'],
parse_dates={'ts': ['date', 'hour']})
assert isinstance(df, DataFrame)
assert isinstance(df.index, pd.DatetimeIndex)
assert len(df) > 1
assert 'date' not in df
assert 'hour' not in df
assert df.index.name == 'ts'
assert 'avgTimeOnSite' in df
assert 'visitors' in df
assert 'newVisits' in df
assert 'pageviewsPerVisit' in df
#dynamic
df = read_ga(
metrics=['avgTimeOnSite', 'visitors', 'newVisits',
'pageviewsPerVisit'],
start_date=start_date,
end_date=end_date,
segment="source=~twitter",
dimensions=['date', 'hour'],
parse_dates={'ts': ['date', 'hour']})
assert isinstance(df, DataFrame)
assert isinstance(df.index, pd.DatetimeIndex)
assert len(df) > 1
assert 'date' not in df
assert 'hour' not in df
assert df.index.name == 'ts'
assert 'avgTimeOnSite' in df
assert 'visitors' in df
assert 'newVisits' in df
assert 'pageviewsPerVisit' in df
except AuthenticationConfigError:
raise nose.SkipTest("authentication error")
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
trnewman/VT-USRP-daughterboard-drivers_python | gr-utils/src/python/gr_plot_const.py | 1 | 9861 | #!/usr/bin/env python
#
# Copyright 2007,2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
from matplotlib.font_manager import fontManager, FontProperties
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
matplotlib.interactive(True)
matplotlib.use('TkAgg')
class draw_constellation:
def __init__(self, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = scipy.complex64
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.95, ("File: %s" % filename), weight="heavy", size=16)
self.text_file_pos = figtext(0.10, 0.90, "File Position: ", weight="heavy", size=16)
self.text_block = figtext(0.40, 0.90, ("Block Size: %d" % self.block_length),
weight="heavy", size=16)
self.text_sr = figtext(0.60, 0.90, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=16)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_iq.get_xlim()
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
connect('button_press_event', self.mouse_button_callback)
show()
def get_data(self):
self.text_file_pos.set_text("File Position: %d" % (self.hfile.tell()//self.sizeof_data))
iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
#print "Read in %d items" % len(iq)
if(len(iq) == 0):
print "End of File"
else:
self.reals = [r.real for r in iq]
self.imags = [i.imag for i in iq]
self.time = [i*(1/self.sample_rate) for i in range(len(self.reals))]
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
self.get_data()
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.4, 0.6])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_iq = self.sp_iq.plot(self.time, self.reals, 'bo-', self.time, self.imags, 'ro-')
# Subplot for constellation plot
self.sp_const = self.fig.add_subplot(2,2,1, position=[0.575, 0.2, 0.4, 0.6])
self.sp_const.set_title(("Constellation"), fontsize=self.title_font_size, fontweight="bold")
self.sp_const.set_xlabel("Inphase", fontsize=self.label_font_size, fontweight="bold")
self.sp_const.set_ylabel("Qaudrature", fontsize=self.label_font_size, fontweight="bold")
self.plot_const = self.sp_const.plot(self.reals, self.imags, 'bo')
# Add plots to mark current location of point between time and constellation plots
self.indx = 0
self.plot_iq += self.sp_iq.plot([self.time[self.indx],], [self.reals[self.indx],], 'mo', ms=8)
self.plot_iq += self.sp_iq.plot([self.time[self.indx],], [self.imags[self.indx],], 'mo', ms=8)
self.plot_const += self.sp_const.plot([self.reals[self.indx],], [self.imags[self.indx],], 'mo', ms=12)
# Adjust axis
self.sp_iq.axis([min(self.time), max(self.time),
1.5*min([min(self.reals), min(self.imags)]),
1.5*max([max(self.reals), max(self.imags)])])
self.sp_const.axis([-2, 2, -2, 2])
draw()
def update_plots(self):
self.plot_iq[0].set_data([self.time, self.reals])
self.plot_iq[1].set_data([self.time, self.imags])
self.sp_iq.axis([min(self.time), max(self.time),
1.5*min([min(self.reals), min(self.imags)]),
1.5*max([max(self.reals), max(self.imags)])])
self.plot_const[0].set_data([self.reals, self.imags])
self.sp_const.axis([-2, 2, -2, 2])
draw()
def zoom(self, event):
newxlim = self.sp_iq.get_xlim()
if(newxlim != self.xlim):
self.xlim = newxlim
r = self.reals[int(ceil(self.xlim[0])) : int(ceil(self.xlim[1]))]
i = self.imags[int(ceil(self.xlim[0])) : int(ceil(self.xlim[1]))]
self.plot_const[0].set_data(r, i)
self.sp_const.axis([-2, 2, -2, 2])
self.manager.canvas.draw()
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
trace_forward_valid_keys = [">",]
trace_backward_valid_keys = ["<",]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
elif(find(event.key, trace_forward_valid_keys)):
self.indx = min(self.indx+1, len(self.time)-1)
self.set_trace(self.indx)
elif(find(event.key, trace_backward_valid_keys)):
self.indx = max(0, self.indx-1)
self.set_trace(self.indx)
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.get_data()
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
self.get_data()
self.update_plots()
def mouse_button_callback(self, event):
x, y = event.xdata, event.ydata
if x is not None and y is not None:
if(event.inaxes == self.sp_iq):
self.indx = searchsorted(self.time, [x])
self.set_trace(self.indx)
def set_trace(self, indx):
self.plot_iq[2].set_data(self.time[indx], self.reals[indx])
self.plot_iq[3].set_data(self.time[indx], self.imags[indx])
self.plot_const[1].set_data(self.reals[indx], self.imags[indx])
draw()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio complex binary file and displays the I&Q data versus time and the constellation plot (I vs. Q). You can set the block size to specify how many points to read in at a time and the start position in the file. By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples."
parser = OptionParser(conflict_handler="resolve", usage=usage, description=description)
parser.add_option("-B", "--block", type="int", default=1000,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = draw_constellation(filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
motmot/strokelitude | plot_raw_timeseries.py | 1 | 4190 | import pkg_resources
import pylab
import numpy as np
import sys
import tables
import motmot.fview_ext_trig.easy_decode as easy_decode
import matplotlib.ticker as mticker
from optparse import OptionParser
import pytz, datetime, time
pacific = pytz.timezone('US/Pacific')
import scipy.io
def doit(fname,options):
fname = sys.argv[1]
h5 = tables.openFile(fname,mode='r')
stroke_data=h5.root.stroke_data[:]
stroke_times = stroke_data['trigger_timestamp']
time_data=h5.root.time_data[:]
gain,offset,resids = easy_decode.get_gain_offset_resids(
input=time_data['framestamp'],
output=time_data['timestamp'])
top = h5.root.time_data.attrs.top
wordstream = h5.root.ain_wordstream[:]
wordstream = wordstream['word'] # extract into normal numpy array
r=easy_decode.easy_decode(wordstream,gain,offset,top)
if r is not None:
chans = r.dtype.fields.keys()
chans.sort()
chans.remove('timestamps')
if 0:
Vcc = h5.root.ain_wordstream.attrs.Vcc
print 'Vcc read from file at',Vcc
else:
Vcc=3.3
print 'Vcc',Vcc
ADCmax = (2**10)-1
analog_gain = Vcc/ADCmax
else:
chans = []
names = h5.root.ain_wordstream.attrs.channel_names
if r is not None:
dt = r['timestamps'][1]-r['timestamps'][0]
samps_per_sec = 1.0/dt
adc_duration = n_adc_samples*dt
print '%d samples at %.1f samples/sec = %.1f seconds'%(n_adc_samples,
samps_per_sec,
adc_duration)
t0 = r['timestamps'][0]
stroke_times_zero_offset = stroke_times-t0
if len(stroke_times_zero_offset):
stroke_data_duration = stroke_times_zero_offset[-1]
total_duration = max(stroke_data_duration,adc_duration)
else:
t0 = 0
N_subplots = len(chans)+5
ax=None
for i in range(N_subplots):
ax = pylab.subplot(N_subplots,1,i+1,sharex=ax)
if i < len(chans):
try:
label = names[int(chans[i])]
except Exception, err:
print 'ERROR: ingnoring exception %s'%(err,)
label = 'channel %s'%chans[i]
ax.plot(r['timestamps']-t_offset,r[chans[i]]*analog_gain,
label=label)
ax.set_ylabel('V')
ax.legend()
elif i == len(chans):
if np.all(np.isnan(stroke_data['right'])):
continue
ax.set_ylabel('R (degrees)')
ax.legend()
elif i == len(chans)+1:
if np.all(np.isnan(stroke_data['left'])):
continue
ax.set_ylabel('L (degrees)')
ax.legend()
elif i == len(chans)+2:
if np.all(np.isnan(stroke_data['left_antenna'])):
continue
ax.plot(stroke_times-t0,stroke_data['left_antenna'],label='Lant')
ax.set_ylabel('L antenna (degrees)')
ax.legend()
elif i == len(chans)+3:
if np.all(np.isnan(stroke_data['right_antenna'])):
continue
ax.plot(stroke_times-t0,stroke_data['right_antenna'],label='Rant')
ax.set_ylabel('R antenna (degrees)')
ax.legend()
elif i == len(chans)+4:
if np.all(np.isnan(stroke_data['head'])):
continue
ax.plot(stroke_times-t0,stroke_data['head'],label='H')
ax.set_ylabel('head (degrees)')
ax.legend()
ax.xaxis.set_major_formatter(mticker.FormatStrFormatter("%s"))
ax.yaxis.set_major_formatter(mticker.FormatStrFormatter("%s"))
ax.set_xlabel('Time (sec)')
ax.set_xlim((t_plot_start,t_plot_start+total_duration))
if options.timestamps:
pylab.gcf().autofmt_xdate()
pylab.show()
def main():
usage = '%prog [options] FILE'
parser = OptionParser(usage)
parser.add_option("--timestamps", action='store_true',
default=False)
(options, args) = parser.parse_args()
fname = args[0]
doit(fname,options)
if __name__=='__main__':
main()
| bsd-3-clause |
mwv/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
wlamond/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
smenon8/AnimalWildlifeEstimator | script/DeriveFinalResultSet.py | 1 | 23433 | # coding: utf-8
# python-3
# Author: Sreejith Menon ([email protected])
# Creation date: 6/14/16
# Description: Contains multiple methods to calculate final result statistics.
# The methods in this script file highly utilizes objects created by JobsMapResultsFilesToContainerObjs.py.
# Three global variables are declared in the scope of this project, the currently point to CSV files that have information from second phase of the mechanical turk deployment.
# gidAidMapFl, aidFeatureMapFl, imgJobMap
# These parameters are used by multiple methods within this script.
import importlib, pandas as pd, statistics as s, re, json, csv, matplotlib.pyplot as plt
import GetPropertiesAPI as GP
from collections import OrderedDict
import IBEIS_mongo_helper as IB_h
import JobsMapResultsFilesToContainerObjs as ImageMap
importlib.reload(ImageMap)
importlib.reload(IB_h)
# Global variables for the scope of this script
gidAidMapFl = "../data/experiment2_gid_aid_map.json"
aidFeatureMapFl = "../data/experiment2_aid_features.json"
imgJobMap = "../data/imageGID_job_map_expt2_corrected.csv"
def PRINT(jsonLike):
print(json.dumps(jsonLike, indent=4))
# This method is a simplification of adding up the share/no share counts of a particular image.
# For example, for a particular image which appeared in 5 different albums, the share rates were 9,5,9,10,10.
# This method will return gid:sum([9,5,9,10,10]).
def genTotCnts(ovrCnts):
dSum = {}
for key in ovrCnts:
dSum[key] = sum(ovrCnts[key])
return dSum
# This dictionary answers the question, what percentage of this feature/image were shared.
# The share proportion is calculated as total_shares/(total_shares+total_not_shares).
def getShrProp(ovrAggCnts) :
totCnt = genTotCnts(ovrAggCnts)
shareKeys = list(filter(lambda x : 'share' in x,totCnt.keys()))
totKeys = list(filter(lambda x : 'total' in x,totCnt.keys()))
shareKeys = sorted(shareKeys,key=lambda x: (x[:len(x)-1]))
totKeys = sorted(totKeys,key=lambda x: (x[:len(x)-1]))
lenKey = len(shareKeys[0])-1
propDict = {}
for i in range(len(shareKeys)):
propDict[shareKeys[i][:lenKey]] = totCnt[shareKeys[i]] * 100 / totCnt[totKeys[i]]
return propDict
# independent of the results
# This method defines the counting logic for a particular image for a given feature.
# For instance, for the feature ‘SPECIES’, there might be images that contains both a zebra and a giraffe.
# In that case, the share counts have to be added to both zebra and giraffe.
def getCountingLogic(*args, **kwargs):
if len(kwargs.keys()):
client, feature, source, withNumInds = args[0], args[1], args[2], args[3]
featuresPerImg = IB_h.extractImageFeaturesFromMap(client, feature, source=source)
else:
gidAidMapFl, aidFeatureMapFl, feature, withNumInds = args[0], args[1], args[2], args[3]
if len(args) == 5:
mode = args[4]
else:
mode = "GZC"
featuresPerImg = ImageMap.extractImageFeaturesFromMap(gidAidMapFl, aidFeatureMapFl, feature, mode=mode)
countLogic = {}
for gid in featuresPerImg.keys():
numInds = len(featuresPerImg[gid]) # number of individuals in a particular image
countFor = list(set(featuresPerImg[gid]))
if withNumInds:
countLogic[gid] = [numInds, countFor]
else:
countLogic[gid] = countFor
return countLogic
def getCountingLogic_old(gidAidMapFl,aidFeatureMapFl,feature,withNumInds=True, mode="GZC"):
featuresPerImg = ImageMap.extractImageFeaturesFromMap(gidAidMapFl,aidFeatureMapFl,feature, mode=mode)
countLogic = {}
for gid in featuresPerImg.keys():
numInds = len(featuresPerImg[gid]) # number of individuals in a particular image
countFor = list(set(featuresPerImg[gid]))
if withNumInds:
countLogic[gid] = [numInds,countFor]
else:
countLogic[gid] = countFor
return countLogic
# This method is used to generate the number of shares and not_shares per feature in a particular album.
# For instance, if the required features list contains ‘SPECIES’, then it tells the share and not_share count per album for each available/identifiable species.
def genAlbmFtrs(gidAidMapFl,aidFeatureMapFl,imgJobMap,reqdFtrList):
albmFtrDict = {}
albumGidDict = ImageMap.genAlbumGIDDictFromMap(imgJobMap)
for cntFtr in reqdFtrList:
cntLogic = getCountingLogic(gidAidMapFl,aidFeatureMapFl,cntFtr,False)
for album in albumGidDict.keys():
ftrDict = albmFtrDict.get(album,{})
for gid in albumGidDict[album]:
ftrList = cntLogic.get(gid,"UNIDENTIFIED")
if ftrList != "UNIDENTIFIED":
for ftr in ftrList:
ftrDict[ftr] = ftrDict.get(ftr,0) + 1
else:
ftrDict['UNIDENTIFIED'] = ftrDict.get('UNIDENTIFIED',0) + 1
albmFtrDict[album] = ftrDict
return albmFtrDict
# This method derives the share proportions of images across different albums as opposed to generating an overall proportion of images.
# For example, the output of this method will contain how many times a particular image was shared in a particular album.
# This is particularly insightful for images that appear across multiple albums.
# It could essentially tell if a certain image was shared in the same way or differently and if the share rate of an image is orthogonal to the context of album.
def getShrPropImgsAcrossAlbms(imgJobMap,resSetStrt,resSetEnd,flNm):
imgAlbumDict = ImageMap.genImgAlbumDictFromMap(imgJobMap)
master = ImageMap.createResultDict(resSetStrt,resSetEnd)
imgShareNotShareList,noResponse = ImageMap.imgShareCountsPerAlbum(imgAlbumDict,master)
albumGidDict = ImageMap.genImgAlbumDictFromMap(imgJobMap)
# filters the GID's that appears in multiple albums
imgsInMultAlbms = list(filter(lambda x : len(albumGidDict[x]) > 1,albumGidDict.keys()))
gidAlbmDict = ImageMap.genImgAlbumDictFromMap(imgJobMap)
imgAlbmPropDict = {} # image album proportion dict
for tup in imgShareNotShareList:
if tup[0] in imgsInMultAlbms:
imgAlbmPropDict[(tup[0],tup[1])] = tup[4]
return imgAlbmPropDict,getConsistencyDict(imgsInMultAlbms,gidAlbmDict,imgAlbmPropDict,flNm)
# This method returns a lambda function or a logic in filtering all valid rows without the UNIDENTIFIED.
# A slight adjustment is needed depending on what feature is being filtered.
def getFltrCondn(ftr):
if ftr=='NID':
return lambda x : x[0] != 'UNIDENTIFIED' and int(x[0]) > 0
else: # for all other features
return lambda x : x[0] != 'UNIDENTIFIED'
# This is a helper method for getConsistencyDict(). It returns three objects.
# i. A list with only valid features i.e. without the UNIDENTIFIED entries.
# ii. A dictionary object that contains the albums in which a particular feature is seen. Ex. GIRAFFE is seen in albums 1, 3,4,5 etc.
# iii. A dictionary object that contains the share proportion of feature per album. Ex. giraffes in album 1 were shared 70% of times etc. Format of the key: (feature, album)
def genObjsForConsistency(gidAidMapFl,aidFeatureMapFl,ftr,imgJobMap,resSetStrt=1,resSetEnd=100):
d = shrCntsByFtrPrAlbm(gidAidMapFl,aidFeatureMapFl,ftr,imgJobMap,resSetStrt,resSetEnd)
fltrCondn = getFltrCondn(ftr)
d_filtered = {}
d_filtered_keys = list(filter(fltrCondn, d.keys()))
for key in d_filtered_keys:
d_filtered[key] = d[key]
ftrAlbmPropDict = getShrProp(d_filtered)
filteredKeyArr = [x[0] for x in ftrAlbmPropDict.keys()]
ftrAlbmDict = {}
for ftr,albm in ftrAlbmPropDict.keys():
ftrAlbmDict[ftr] = ftrAlbmDict.get(ftr,[]) + [albm]
return filteredKeyArr,ftrAlbmDict,ftrAlbmPropDict
# This method should be thought as the main counting logic while calculating the share rate related statistic for any feature that is extracted from IBEIS.
# The output dictionary is all the valid features and thier share-rates across different albums.
# The output dictionary is a key - dictionary pair. Ex. {giraffe : {album_1 : 70, album_2 : 89}, ...}
def getConsistencyDict(filteredKeyArr,ftrAlbmDict,ftrAlbmShrPropDict,flNm='/tmp/getConsistencyDict.output'):
consistency = {}
for ftr in filteredKeyArr:
tempDict = {}
for albm in ftrAlbmDict[ftr]:
tempDict[albm] = ftrAlbmShrPropDict.get((ftr,albm),None)
consistency[ftr] = tempDict
consistency_mult = {}
for key in consistency:
if len(consistency[key]) > 1:
consistency_mult[key] = consistency[key]
fl = open(flNm,"w")
json.dump(consistency_mult,fl,indent=4)
fl.close()
return consistency_mult
# consistency object is retuned by getShrPropImgsAcrossAlbms()
# his method generates the mean, variance and standard deviation for a particular feature element for across different albums.
def genVarStddevShrPropAcrsAlbms(consistency):
gidShrVarStdDevDict = {}
for gid in consistency:
albmShrRateD = consistency[gid]
if None not in albmShrRateD.values():
var = s.variance(albmShrRateD.values())
mean = s.mean(albmShrRateD.values())
std = s.stdev(albmShrRateD.values())
gidShrVarStdDevDict[gid] = {'mean' : mean,
'variance' : var,
'standard_deviation' : std}
return gidShrVarStdDevDict
# This method generates the overall share statistic for every element of a particular feature.
# The logic involves simple counting for each element of the feature based on the counting logic.
# This explicitly handles the images where there are multiple features and the counts are made in each of the feature.
# This method is ideal for getting share rate of a particular feature across the entire experiment, not limited by album.
# In other words, the result dict has share proportions calculated across albums.
def ovrallShrCntsByFtr(gidAidMapFl,aidFeatureMapFl,feature,imgJobMap,resSetStrt,resSetEnd):
countLogic = getCountingLogic(gidAidMapFl,aidFeatureMapFl,feature)
imgAlbumDict = ImageMap.genImgAlbumDictFromMap(imgJobMap)
master = ImageMap.createResultDict(resSetStrt,resSetEnd)
imgShareNotShareList,noResponse = ImageMap.imgShareCountsPerAlbum(imgAlbumDict,master)
answerSet = {}
for tup in imgShareNotShareList:
if tup[0] not in countLogic.keys(): # where the image has no associated annotation, tup[0] = GID
answerSet[('UNIDENTIFIED' , 'share')] = answerSet.get(('UNIDENTIFIED' , 'share'),[]) + [tup[2]]
answerSet[('UNIDENTIFIED' , 'not_share')] = answerSet.get(('UNIDENTIFIED' , 'not_share'),[]) + [tup[3]]
answerSet[('UNIDENTIFIED', 'total')] = answerSet.get(('UNIDENTIFIED' , 'total'),[]) + [tup[2] + tup[3]]
else:
logic = countLogic[tup[0]]
for countForEle in logic[1]:
varNameShare = (countForEle , "share")
varNameNotShare = (countForEle , "not_share")
varNameTot = (countForEle , "total")
answerSet[varNameShare] = answerSet.get(varNameShare,[]) + [tup[2]]
answerSet[varNameNotShare] = answerSet.get(varNameNotShare,[]) + [tup[3]]
answerSet[varNameTot] = answerSet.get(varNameTot,[]) + [tup[2] + tup[3]]
return answerSet
# This method generates the share statistic for every element of a particular feature for each album in which there are some instances with the said feature.
# The logic involves simple counting for each element of the feature based on the counting logic.
# This explicitly handles the images where there are multiple features and the counts are made in each of the feature.
# This method is ideal for getting share rate of a particular feature across the entire experiment by album.
# In other words, this should used to calculate and compare the share rates of a particular feature and how it changes across different albums.
# As a sidenote, if the same feature is being shared differently across albums, then it means there is some contextual information from the album that is dominating this effect.
def shrCntsByFtrPrAlbm(gidAidMapFl,aidFeatureMapFl,feature,imgJobMap,resSetStrt=1,resSetEnd=100):
countLogic = getCountingLogic(gidAidMapFl,aidFeatureMapFl,feature)
imgAlbumDict = ImageMap.genImgAlbumDictFromMap(imgJobMap)
master = ImageMap.createResultDict(resSetStrt,resSetEnd)
imgShareNotShareList,noResponse = ImageMap.imgShareCountsPerAlbum(imgAlbumDict,master)
answerSet = {}
for tup in imgShareNotShareList:
if tup[0] not in countLogic.keys(): # where the image has no associated annotation, tup[0] = GID
answerSet[('UNIDENTIFIED' , 'share', tup[1])] = answerSet.get(('UNIDENTIFIED' , 'share', tup[1]),[]) + [tup[2]]
answerSet[('UNIDENTIFIED' , 'not_share', tup[1])] = answerSet.get(('UNIDENTIFIED' , 'not_share', tup[1]),[]) + [tup[3]]
answerSet[('UNIDENTIFIED', 'total', tup[1])] = answerSet.get(('UNIDENTIFIED' , 'total', tup[1]),[]) + [tup[2] + tup[3]]
else:
logic = countLogic[tup[0]]
for countForEle in logic[1]:
varNameShare = (countForEle , tup[1], "share")
varNameNotShare = (countForEle , tup[1], "not_share")
varNameTot = (countForEle , tup[1], "total")
answerSet[varNameShare] = answerSet.get(varNameShare,[]) + [tup[2]]
answerSet[varNameNotShare] = answerSet.get(varNameNotShare,[]) + [tup[3]]
answerSet[varNameTot] = answerSet.get(varNameTot,[]) + [tup[2] + tup[3]]
return answerSet
# This method is used to generate cross statistic for two features.
# The logic involves simple counting for each element of the feature based on the counting logic.
# Since there are two features to be dealt here, the instances are divided into even and uneven features.
# Even features are defined as when the number of instances of feature 1 and feature 2 are identical.
# Uneven features on the other hand are when the number of instances of feature 1 and feature 2 are not identical.
# Uneven features are handled differently for 1-many, many-1 and many-many.
def ovrallShrCntsByTwoFtrs(getCountingLogicgidAidMapFl,aidFeatureMapFl,ftr1,ftr2,imgJobMap,resSetStrt,resSetEnd):
countLogic1 = (gidAidMapFl,aidFeatureMapFl,ftr1)
countLogic2 = getCountingLogic(gidAidMapFl,aidFeatureMapFl,ftr2)
imgAlbumDict = ImageMap.genImgAlbumDictFromMap(imgJobMap)
master = ImageMap.createResultDict(resSetStrt,resSetEnd)
imgShareNotShareList,noResponse = ImageMap.imgShareCountsPerAlbum(imgAlbumDict,master)
answerSet = {}
unEvnFtrsTups =[]
for tup in imgShareNotShareList:
if tup[0] not in countLogic1.keys(): # where the image has no associated annotation, tup[0] = GID
pass
answerSet[('UNIDENTIFIED' , None,'share')] = answerSet.get(('UNIDENTIFIED' ,None, 'share'),[]) + [tup[2]]
answerSet[('UNIDENTIFIED' , None, 'not_share')] = answerSet.get(('UNIDENTIFIED' , None, 'not_share'),[]) + [tup[3]]
answerSet[('UNIDENTIFIED' , None, 'total')] = answerSet.get(('UNIDENTIFIED' , None, 'total'),[]) + [tup[2]+tup[3]]
else:
logic1 = countLogic1[tup[0]]
logic2 = countLogic2[tup[0]]
for i in range(len(logic1[1])):
if len(logic1[1]) == len(logic2[1]): # there are two individuals with matching features
varNameShare = (logic1[1][i] , logic2[1][i], "share")
varNameNotShare = (logic1[1][i] , logic2[1][i], "not_share")
varNameTot = (logic1[1][i] , logic2[1][i], "total")
# there are more logic1 features than logic2 features
elif len(logic1[1]) == 1 or len(logic2[1]) == 1: # one of the logic has just 1 feature
if len(logic1[1]) == 1:
varNameShare = (logic1[1][0] , logic2[1][i], "share")
varNameNotShare = (logic1[1][0] , logic2[1][i], "not_share")
varNameTot = (logic1[1][0] , logic2[1][i], "total")
else:
varNameShare = (logic1[1][i] , logic2[1][0], "share")
varNameNotShare = (logic1[1][i] , logic2[1][0], "not_share")
varNameTot = (logic1[1][i] , logic2[1][0], "total")
else: # uneven features in logic1 and logic2
unEvnFtrsTups.append(tup)
answerSet[varNameShare] = answerSet.get(varNameShare,[]) + [tup[2]]
answerSet[varNameNotShare] = answerSet.get(varNameNotShare,[]) + [tup[3]]
answerSet[varNameTot] = answerSet.get(varNameTot,[]) + [tup[2] + tup[3]]
# handling un-even features - issues after GetPropertiesAPI update - issue being tracked - issue name: Issue with consolidated feature file #1
unEvnFtrsTups = list(set(unEvnFtrsTups))
for tup in unEvnFtrsTups:
aidList = GP.getAnnotID(tup[0])
for aid in aidList:
feature1 = GP.getImageFeature(aid,GP.ftrNms[ftr1])
feature2 = GP.getImageFeature(aid,GP.ftrNms[ftr2])
feature1 = list(map(str,feature1))
feature2 = list(map(str,feature2))
if ftr1 == 'AGE':
feature1 = GP.getAgeFeatureReadableFmt(feature1)
if ftr2 == 'AGE':
feature2 = GP.getAgeFeatureReadableFmt(feature2)
varNameShare = (feature1[0],feature2[0],"share")
varNameNotShare = (feature1[0],feature2[0],"not_share")
varNameTot = (feature1[0],feature2[0],"total")
answerSet[varNameShare] = answerSet.get(varNameShare,[]) + [tup[2]]
answerSet[varNameNotShare] = answerSet.get(varNameNotShare,[]) + [tup[3]]
answerSet[varNameTot] = answerSet.get(varNameTot,[]) + [tup[2] + tup[3]]
return answerSet
# This method generates the rank list of number of individuals in an image by share proportion.
def genNumIndsRankList():
# no. of individuals per image
countLogic = getCountingLogic(gidAidMapFl,aidFeatureMapFl,"SPECIES")
imgAlbumDict = ImageMap.genImgAlbumDictFromMap(imgJobMap)
master = ImageMap.createResultDict(1,100)
imgShareNotShareList,noResponse = ImageMap.imgShareCountsPerAlbum(imgAlbumDict,master)
totOfIndsPerImg = {}
for key in countLogic:
totOfIndsPerImg[countLogic[key][0]] = totOfIndsPerImg.get(countLogic[key][0],0) + 1
# Rank list by number of images
noOfIndsPerImgSharesRnkLst = {}
noOfIndsPerImgNotSharesRnkLst = {}
for tup in imgShareNotShareList:
if tup[0] in countLogic.keys():
noOfIndsPerImgSharesRnkLst[countLogic[tup[0]][0]] = noOfIndsPerImgSharesRnkLst.get(countLogic[tup[0]][0],0) + tup[2]
noOfIndsPerImgNotSharesRnkLst[countLogic[tup[0]][0]] = noOfIndsPerImgNotSharesRnkLst.get(countLogic[tup[0]][0],0) + tup[3]
return noOfIndsPerImgSharesRnkLst,noOfIndsPerImgNotSharesRnkLst
# Comments: Number of shares/not shares for each and every position are enumerated inside a list.
# Use of OrderedDict() from the Python collections framework ensures that the records are picked in the exact same order they appear in the albums.
# This returned dictionary can then be embedded inside a data-frame and can be visualized.
def getPosShrProptn(imgJobMap,resStart,resEnd):
pos = {} # 1:(shr,noShr,prop)
for i in range(resStart,resEnd):
results = ImageMap.createResultDict(i,i)
imgAlbumDict = ImageMap.genImgAlbumDictFromMap(imgJobMap)
shrCnt,junk = ImageMap.imgShareCountsPerAlbum(imgAlbumDict,results)
for i in range(1,len(shrCnt)+1):
pos[i] = pos.get(i,[]) + [(shrCnt[i-1][2],shrCnt[i-1][3])]
summaryPosCnt = OrderedDict()
for position in pos:
shrs = [x[0] for x in pos[position]]
notShrs = [x[1] for x in pos[position]]
total = [x[0]+x[1] for x in pos[position]]
summaryPosCnt[position] = {'share' : sum(shrs),
'not_share' : sum(notShrs),
'total': sum(total)
}
for pos in summaryPosCnt:
dct = summaryPosCnt[pos]
dct['proportion'] = dct['share'] * 100 / dct['total']
return summaryPosCnt
def __main__():
d = ovrallShrCntsByTwoFtrs(gidAidMapFl,aidFeatureMapFl,"SPECIES","AGE",imgJobMap,1,100)
#d = shrCntsByFtrPrAlbm(gidAidMapFl,aidFeatureMapFl,"SPECIES",imgJobMap,1,50)
#d = ovrallShrCntsByFtr(gidAidMapFl,aidFeatureMapFl,"SPECIES",imgJobMap,1,50)
dc = getShrProp(d)
pd.DataFrame(dc,index=["share proportion"]).transpose()
'''
resultsPerJobDf > Gives you shares/not shares per image per album (Python Object of .results file converted to DF)
resultsPerJobDf['GID','Album','Shared','Not Shared','Proportion']
'''
imgAlbumDict = ImageMap.genImgAlbumDictFromMap("../data/imageGID_job_map_expt2_corrected.csv")
master = ImageMap.createResultDict(1,100)
imgShareNotShareList,noResponse = ImageMap.imgShareCountsPerAlbum(imgAlbumDict,master)
resultsPerJobDf = pd.DataFrame(imgShareNotShareList,columns = ['GID','Album','Shared','Not Shared','Proportion'])
'''
Code for reading from json files into data frames
aidGidDf['AID','GID']
aidFeaturesDf['AID',[FEATURES]]
'''
aidGidDict = ImageMap.genAidGidTupListFromMap('../data/experiment2_gid_aid_map.json')
aidGidDf= pd.DataFrame(aidGidDict,columns = ['AID','GID'])
aidFeaturesDf = pd.DataFrame(ImageMap.genAidFeatureDictList('../data/experiment2_aid_features.json'))
aidFeaturesDf['AID'] = aidFeaturesDf['AID'].astype('int32')
'''
rankListImgsDf > Gives you the results of number of times each image was shared overall
rankListImgsDf['GID','Shared','Not Shared','Proportion']
'''
rankListImgsDf = resultsPerJobDf.groupby(['GID'])['Shared','Not Shared'].sum()
rankListImgsDf['Total'] = rankListImgsDf['Shared'] + rankListImgsDf['Not Shared']
rankListImgsDf['Proportion'] = rankListImgsDf['Shared'] * 100 / rankListImgsDf['Total']
rankListImgsDf = rankListImgsDf.sort_values(by = ['Proportion'],ascending = False)
#rankListImgsDf.to_csv('../FinalResults/rankListImages_expt2.csv')
'''
resultsAIDGIDDf > Merged data frame that add's AID info to the results data
resultsAIDGIDDf['AID' + [resultsPerJobDf]]
gidAidResultsFeaturesDf > A master data frame that has results data merged along with all the image features
gidAidResultsFeaturesDf['GID','AID',[FEATURES],[resultsPerJobDf]]
'''
resultsAIDGIDDf = pd.merge(aidGidDf,resultsPerJobDf,left_on='GID',right_on = 'GID',how="right")
gidAidResultsFeaturesDf = pd.merge(resultsAIDGIDDf,aidFeaturesDf,left_on = 'AID',right_on = 'AID') # most important data frame with all the info
gidAidResultsFeaturesDf.to_csv("../FinalResults/resultsFeaturesComb_expt2.csv",index=False)
if __name__ == "__main__":
__main__()
| bsd-3-clause |
nest/nest-simulator | pynest/examples/hh_phaseplane.py | 8 | 5059 | # -*- coding: utf-8 -*-
#
# hh_phaseplane.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Numerical phase-plane analysis of the Hodgkin-Huxley neuron
-----------------------------------------------------------
hh_phaseplane makes a numerical phase-plane analysis of the Hodgkin-Huxley
neuron (``hh_psc_alpha``). Dynamics is investigated in the V-n space (see remark
below). A constant DC can be specified and its influence on the nullclines
can be studied.
Remark
~~~~~~
To make the two-dimensional analysis possible, the (four-dimensional)
Hodgkin-Huxley formalism needs to be artificially reduced to two dimensions,
in this case by 'clamping' the two other variables, `m` and `h`, to
constant values (`m_eq` and `h_eq`).
"""
import nest
import numpy as np
from matplotlib import pyplot as plt
amplitude = 100. # Set externally applied current amplitude in pA
dt = 0.1 # simulation step length [ms]
v_min = -100. # Min membrane potential
v_max = 42. # Max membrane potential
n_min = 0.1 # Min inactivation variable
n_max = 0.81 # Max inactivation variable
delta_v = 2. # Membrane potential step length
delta_n = 0.01 # Inactivation variable step length
V_vec = np.arange(v_min, v_max, delta_v)
n_vec = np.arange(n_min, n_max, delta_n)
num_v_steps = len(V_vec)
num_n_steps = len(n_vec)
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
nest.SetKernelStatus({'resolution': dt})
neuron = nest.Create('hh_psc_alpha')
# Numerically obtain equilibrium state
nest.Simulate(1000)
m_eq = neuron.Act_m
h_eq = neuron.Inact_h
neuron.I_e = amplitude # Apply external current
# Scan state space
print('Scanning phase space')
V_matrix = np.zeros([num_n_steps, num_v_steps])
n_matrix = np.zeros([num_n_steps, num_v_steps])
# pp_data will contain the phase-plane data as a vector field
pp_data = np.zeros([num_n_steps * num_v_steps, 4])
count = 0
for i, V in enumerate(V_vec):
for j, n in enumerate(n_vec):
# Set V_m and n
neuron.set(V_m=V, Act_n=n, Act_m=m_eq, Inact_h=h_eq)
# Find state
V_m = neuron.V_m
Act_n = neuron.Act_n
# Simulate a short while
nest.Simulate(dt)
# Find difference between new state and old state
V_m_new = neuron.V_m - V
Act_n_new = neuron.Act_n - n
# Store in vector for later analysis
V_matrix[j, i] = abs(V_m_new)
n_matrix[j, i] = abs(Act_n_new)
pp_data[count] = np.array([V_m, Act_n, V_m_new, Act_n_new])
if count % 10 == 0:
# Write updated state next to old state
print('')
print('Vm: \t', V_m)
print('new Vm:\t', V_m_new)
print('Act_n:', Act_n)
print('new Act_n:', Act_n_new)
count += 1
# Set state for AP generation
neuron.set(V_m=-34., Act_n=0.2, Act_m=m_eq, Inact_h=h_eq)
print('')
print('AP-trajectory')
# ap will contain the trace of a single action potential as one possible
# numerical solution in the vector field
ap = np.zeros([1000, 2])
for i in range(1000):
# Find state
V_m = neuron.V_m
Act_n = neuron.Act_n
if i % 10 == 0:
# Write new state next to old state
print('Vm: \t', V_m)
print('Act_n:', Act_n)
ap[i] = np.array([V_m, Act_n])
# Simulate again
neuron.set(Act_m=m_eq, Inact_h=h_eq)
nest.Simulate(dt)
# Make analysis
print('')
print('Plot analysis')
nullcline_V = []
nullcline_n = []
print('Searching nullclines')
for i in range(0, len(V_vec)):
index = np.nanargmin(V_matrix[:][i])
if index != 0 and index != len(n_vec):
nullcline_V.append([V_vec[i], n_vec[index]])
index = np.nanargmin(n_matrix[:][i])
if index != 0 and index != len(n_vec):
nullcline_n.append([V_vec[i], n_vec[index]])
print('Plotting vector field')
factor = 0.1
for i in range(0, np.shape(pp_data)[0], 3):
plt.plot([pp_data[i][0], pp_data[i][0] + factor * pp_data[i][2]],
[pp_data[i][1], pp_data[i][1] + factor * pp_data[i][3]],
color=[0.6, 0.6, 0.6])
plt.plot(nullcline_V[:][0], nullcline_V[:][1], linewidth=2.0)
plt.plot(nullcline_n[:][0], nullcline_n[:][1], linewidth=2.0)
plt.xlim([V_vec[0], V_vec[-1]])
plt.ylim([n_vec[0], n_vec[-1]])
plt.plot(ap[:][0], ap[:][1], color='black', linewidth=1.0)
plt.xlabel('Membrane potential V [mV]')
plt.ylabel('Inactivation variable n')
plt.title('Phase space of the Hodgkin-Huxley Neuron')
plt.show()
| gpl-2.0 |
zhenv5/scikit-learn | sklearn/ensemble/weight_boosting.py | 71 | 40664 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
j33433/MPowerTCX | source/physics/physics.py | 1 | 3722 | #!/usr/bin/env python
#
# MPowerTCX: Share Schwinn A.C. indoor cycle data with Strava, GoldenCheetah and other apps
# Copyright (C) 2017 James Roth
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file contains a model to estimate bike speed based on power
#
import math
class SimpleBike(object):
def __init__(self, mass):
self.drag_coefficient = 0.88
self.frontal_area = 0.32
self.rho = 1.2
self.eta = 0.97
self.rolling_coefficient = 5.0e-3
# kg
self.mass = mass
self.grade = 0.0
self.g = 9.81
self.time_delta = 1
self.velocity = 0.0
self.distance = 0.0
def set_time_delta(self, delta):
self.time_delta = delta
def drag(self, velocity):
return 0.5 * self.drag_coefficient * self.frontal_area * self.rho * velocity * velocity
def rolling(self, grade, velocity):
if velocity > 0.01:
return self.g * math.cos(math.atan(grade)) * self.mass * self.rolling_coefficient
else:
return 0.0
def gravity(self, grade):
return self.g * math.sin(math.atan(grade)) * self.mass
def next_sample(self, power):
drag = self.drag(self.velocity)
rolling = self.rolling(self.grade, self.velocity)
gravity = self.gravity(self.grade)
total_force = drag + rolling + gravity
power_needed = total_force * (self.velocity / self.eta)
net_power = power - power_needed
r = self.velocity * self.velocity + 2 * net_power * self.time_delta * self.eta / self.mass
if r > 0.0:
self.velocity = math.sqrt(r)
else:
self.velocity = 0.0
# print ("p %.2f, v %.2f, drag %.2f, rolling %.6f, gravity %.2f, total %.2f, r %.2f" % (power, self.velocity, drag, gravity, rolling, total_force, r))
self.distance += self.velocity * self.time_delta
# m/s to mph
v_mph = self.velocity * 2.23694
return power, v_mph, self.distance
def main():
#import matplotlib.pyplot as plt
velocity_a = [0]
time_a = [0]
power_a = [0]
distance_a = [0]
bike = SimpleBike()
# loop over time:
for x in range(0, 150):
(power, velocity, distance) = bike.next_sample()
velocity_a.append(velocity)
power_a.append(power)
distance_a.append(distance)
time_a.append(bike.time_delta * x)
fig, velocity_axis = plt.subplots()
velocity_axis.margins(0.05)
velocity_axis.plot(time_a, velocity_a, color='b')
velocity_axis.set_xlabel('time (s)')
velocity_axis.set_ylabel('velocity (mph)', color='b')
power_axis = velocity_axis.twinx()
power_axis.margins(0.05)
power_axis.plot(time_a, power_a, 'r')
power_axis.set_ylabel('power (w)', color='r')
distance_axis = velocity_axis.twinx()
distance_axis.margins(0.05)
distance_axis.plot(time_a, distance_a, 'g')
distance_axis.set_ylabel('distance (m)', color='g')
plt.show()
# main()
| gpl-3.0 |
xzh86/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
FRESNA/PyPSA | examples/ac-dc-meshed/ac-dc-lpf.py | 1 | 1741 |
#Compute Linear Power Flow for each snapshot for AC-DC network in
#folder ac-dc-data/
# make the code as Python 3 compatible as possible
from __future__ import print_function, division
from __future__ import absolute_import
import pypsa, os
import pandas as pd
import numpy as np
from itertools import chain
network = pypsa.Network()
folder_name = "ac-dc-data"
network.import_from_csv_folder(folder_name)
network.lpf(network.snapshots)
print("\nSub-Networks:")
for sn in network.sub_networks.obj:
print(sn,network.sub_networks.at[sn.name,"carrier"],len(sn.buses()),len(sn.branches()))
print("\nControllable branches:")
print(network.links)
now = network.snapshots[5]
print("\nCheck power balance at each bus:")
for bus in network.buses.index:
print("\n"*3+bus)
generators = sum(network.generators_t.p.loc[now,network.generators.bus==bus])
loads = sum(network.loads_t.p.loc[now,network.loads.bus==bus])
print("Generators:",generators)
print("Loads:",loads)
print("Total:",generators-loads)
p0 = 0.
p1 = 0.
for c in network.iterate_components(network.branch_components):
bs = (c.df.bus0 == bus)
if bs.any():
print(c,"\n",c.pnl.p0.loc[now,bs])
p0 += c.pnl.p0.loc[now,bs].sum()
bs = (c.df.bus1 == bus)
if bs.any():
print(c,"\n",c.pnl.p1.loc[now,bs])
p1 += c.pnl.p1.loc[now,bs].sum()
print("Branches",p0+p1)
np.testing.assert_allclose(p0+p1,generators-loads)
print("")
print(sum(network.generators_t.p.loc[now]))
print(sum(network.loads_t.p.loc[now]))
results_folder_name = os.path.join(folder_name,"results-lpf")
if True:
network.export_to_csv_folder(results_folder_name)
| gpl-3.0 |
fyffyt/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
rjurney/Agile_Data_Code_2 | ch07/train_sklearn_model.py | 1 | 5304 | import sys, os, re
sys.path.append("lib")
import utils
import numpy as np
import sklearn
import iso8601
import datetime
print("Imports loaded...")
# Load and check the size of our training data. May take a minute.
print("Original JSON file size: {:,} Bytes".format(os.path.getsize("data/simple_flight_delay_features.jsonl")))
training_data = utils.read_json_lines_file('data/simple_flight_delay_features.jsonl')
print("Training items: {:,}".format(len(training_data))) # 5,714,008
print("Data loaded...")
# Inspect a record before we alter them
print("Size of training data in RAM: {:,} Bytes".format(sys.getsizeof(training_data))) # 50MB
print(training_data[0])
# # Sample down our training data at first...
sampled_training_data = training_data#np.random.choice(training_data, 1000000)
print("Sampled items: {:,} Bytes".format(len(training_data)))
print("Data sampled...")
# Separate our results from the rest of the data, vectorize and size up
results = [record['ArrDelay'] for record in sampled_training_data]
results_vector = np.array(results)
sys.getsizeof(results_vector) # 45,712,160 Bytes
print("Results vectorized...")
# Remove the two delay fields and the flight date from our training data
for item in sampled_training_data:
item.pop('ArrDelay', None)
item.pop('FlightDate', None)
print("ArrDelay and FlightDate removed from training data...")
# Must convert datetime strings to unix times
for item in sampled_training_data:
if isinstance(item['CRSArrTime'], str):
dt = iso8601.parse_date(item['CRSArrTime'])
unix_time = int(dt.timestamp())
item['CRSArrTime'] = unix_time
if isinstance(item['CRSDepTime'], str):
dt = iso8601.parse_date(item['CRSDepTime'])
unix_time = int(dt.timestamp())
item['CRSDepTime'] = unix_time
print("Datetimes converted to unix times...")
# Use DictVectorizer to convert feature dicts to vectors
from sklearn.feature_extraction import DictVectorizer
print("Original dimensions: [{:,}]".format(len(training_data)))
vectorizer = DictVectorizer()
training_vectors = vectorizer.fit_transform(training_data)
print("Size of DictVectorized vectors: {:,} Bytes".format(training_vectors.data.nbytes))
print("Training data vectorized...")
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
training_vectors,
results_vector,
test_size=0.1,
random_state=43
)
print(X_train.shape, X_test.shape)
print(y_train.shape, y_test.shape)
print("Test train split performed...")
# Train a regressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split, cross_val_predict
from sklearn.metrics import median_absolute_error, r2_score
print("Regressor library and metrics imported...")
regressor = LinearRegression()
print("Regressor instantiated...")
from sklearn.ensemble import GradientBoostingRegressor
regressor = GradientBoostingRegressor
print("Swapped gradient boosting trees for linear regression!")
# Lets go back for now...
regressor = LinearRegression()
print("Swapped back to linear regression!")
regressor.fit(X_train, y_train)
print("Regressor fitted...")
predicted = regressor.predict(X_test)
print("Predictions made for X_test...")
# Definitions from http://scikit-learn.org/stable/modules/model_evaluation.html
from sklearn.metrics import median_absolute_error, r2_score
# Median absolute error is the median of all absolute differences between the target and the prediction.
# Less is better, more indicates a high error between target and prediction.
medae = median_absolute_error(y_test, predicted)
print("Median absolute error: {:.3g}".format(medae))
# R2 score is the coefficient of determination. Ranges from 1-0, 1.0 is best, 0.0 is worst.
# Measures how well future samples are likely to be predicted.
r2 = r2_score(y_test, predicted)
print("r2 score: {:.3g}".format(r2))
# Plot outputs, compare actual vs predicted values
# import matplotlib.pyplot as plt
#
# plt.scatter(
# y_test,
# predicted,
# color='blue',
# linewidth=1
# )
#
# plt.xticks(())
# plt.yticks(())
#
# plt.show()
#
# Persist model using pickle
#
print("Testing model persistance...")
import pickle
project_home = os.environ["PROJECT_HOME"]
# Dump the model itself
regressor_path = "{}/models/sklearn_regressor.pkl".format(project_home)
regressor_bytes = pickle.dumps(regressor)
model_f = open(regressor_path, 'wb')
model_f.write(regressor_bytes)
# Dump the DictVectorizer that vectorizes the features
vectorizer_path = "{}/models/sklearn_vectorizer.pkl".format(project_home)
vectorizer_bytes = pickle.dumps(vectorizer)
vectorizer_f = open(vectorizer_path, 'wb')
vectorizer_f.write(vectorizer_bytes)
# Load the model itself
model_f = open(regressor_path, 'rb')
model_bytes = model_f.read()
regressor = pickle.loads(model_bytes)
# Load the DictVectorizer
vectorizer_f = open(vectorizer_path, 'rb')
vectorizer_bytes = vectorizer_f.read()
vectorizer = pickle.loads(vectorizer_bytes)
#
# Persist model using sklearn.externals.joblib
#
from sklearn.externals import joblib
# Dump the model and vectorizer
joblib.dump(regressor, regressor_path)
joblib.dump(vectorizer, vectorizer_path)
# Load the model and vectorizer
regressor = joblib.load(regressor_path)
vectorizer = joblib.load(vectorizer_path)
| mit |
yipenggao/moose | modules/porous_flow/doc/tests/radialinjection.py | 5 | 4190 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
#
# The two phase radial injection problem has a similarity solution (r^2/t)
#
# Read MOOSE simulation data for constant time (tdata) and constant
# radial distance (rdata)
tdata = np.genfromtxt('../../tests/dirackernels/theis3_line_0016.csv', delimiter = ',', names = True, dtype = float)
rdata = np.genfromtxt('../../tests/dirackernels/theis3.csv', delimiter = ',', names = True, dtype = float)
# Distance where data is sampled as a function of time
r = 4
# Time where data is sampled along the spatial dimension
t = 2e4
fig, axes = plt.subplots(1, 2, figsize = (15, 4))
# Water pressure vs similarity solution
axes[0].plot(tdata['x']**2 / t, tdata['ppwater'] * 1e-6, label = 'Fixed $t$')
axes[0].plot(r**2 / rdata['time'], rdata['ppwater'] * 1e-6, 'o', label = 'Fixed $r$')
axes[0].set_xscale('log')
axes[0].set_xlim([5e-4, 5e1])
axes[0].set_xlabel('$\zeta = r^2/t$')
axes[0].set_ylabel('Liquid pressure (MPa)')
axes[0].legend()
# Gas saturation vs similarity solution
axes[1].plot(tdata['x']**2 / t, tdata['sgas'], label = 'Fixed $t$')
axes[1].plot(r**2 / rdata['time'], rdata['sgas'], 'o', label = 'Fixed $r$')
axes[1].set_xscale('log')
axes[1].set_xlim([5e-4, 5e1])
axes[1].set_ylim([-0.1, 1.1])
axes[1].set_xlabel('$\zeta = r^2/t$')
axes[1].set_ylabel('Gas saturation (-)')
axes[1].legend()
plt.tight_layout()
plt.savefig("theis_similarity_fig.pdf")
#
# The similarity solution (r^2/t) is applicable even when dissolution is included
#
# Read MOOSE simulation data for constant time (tdata) and constant
# radial distance (rdata) using the water-ncg fluid state
tdata = np.genfromtxt('../../tests/fluidstate/theis_csvout_line_0028.csv', delimiter = ',', names = True, dtype = float)
rdata = np.genfromtxt('../../tests/fluidstate/theis_csvout.csv', delimiter = ',', names = True, dtype = float)
# Distance where data is sampled as a function of time
r = 4
# Time where data is sampled along the spatial dimension
t = 1e5
fig, axes = plt.subplots(1, 2, figsize = (15, 4))
# Gas pressure vs similarity solution
axes[0].plot(tdata['x']**2 / t, tdata['pgas'] * 1e-6, label = 'Fixed $t$')
axes[0].plot(r**2 / rdata['time'], rdata['pgas'] * 1e-6, 'o', label = 'Fixed $r$')
axes[0].set_xscale('log')
axes[0].set_xlim([1e-4, 5e1])
axes[0].set_xlabel('$\zeta = r^2/t$')
axes[0].set_ylabel('Gas pressure (MPa)')
axes[0].legend()
# Total mass fraction vs similarity solution
axes[1].plot(tdata['x']**2 / t, tdata['zi'], label = 'Fixed $t$')
axes[1].plot(r**2 / rdata['time'], rdata['zi'], 'o', label = 'Fixed $r$')
axes[1].set_xscale('log')
axes[1].set_xlim([1e-4, 5e1])
axes[1].set_ylim([-0.1, 1.1])
axes[1].set_xlabel('$\zeta = r^2/t$')
axes[1].set_ylabel('Total mass fraction (-)')
axes[1].legend()
plt.tight_layout()
plt.savefig("theis_similarity_waterncg_fig.pdf")
#
# Read MOOSE simulation data for constant time (tdata) and constant
# radial distance (rdata) using the brine-co2 fluid state
tdata = np.genfromtxt('../../tests/fluidstate/theis_brineco2_csvout_line_0028.csv', delimiter = ',', names = True, dtype = float)
rdata = np.genfromtxt('../../tests/fluidstate/theis_brineco2_csvout.csv', delimiter = ',', names = True, dtype = float)
# Distance where data is sampled as a function of time
r = 4
# Time where data is sampled along the spatial dimension
t = 1e5
fig, axes = plt.subplots(1, 2, figsize = (15, 4))
# Gas pressure vs similarity solution
axes[0].plot(tdata['x']**2 / t, tdata['pgas'] * 1e-6, label = 'Fixed $t$')
axes[0].plot(r**2 / rdata['time'], rdata['pgas'] * 1e-6, 'o', label = 'Fixed $r$')
axes[0].set_xscale('log')
axes[0].set_xlim([1e-4, 5e1])
axes[0].set_xlabel('$\zeta = r^2/t$')
axes[0].set_ylabel('Gas pressure (MPa)')
axes[0].legend()
# Total mass fraction vs similarity solution
axes[1].plot(tdata['x']**2 / t, tdata['zi'], label = 'Fixed $t$')
axes[1].plot(r**2 / rdata['time'], rdata['zi'], 'o', label = 'Fixed $r$')
axes[1].set_xscale('log')
axes[1].set_xlim([1e-4, 5e1])
axes[1].set_ylim([-0.1, 1.1])
axes[1].set_xlabel('$\zeta = r^2/t$')
axes[1].set_ylabel('Total mass fraction (-)')
axes[1].legend()
plt.tight_layout()
plt.savefig("theis_similarity_brineco2_fig.pdf")
| lgpl-2.1 |
jlegendary/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtk.py | 69 | 43991 | from __future__ import division
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import matplotlib
from matplotlib import verbose
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import cbook
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def show(mainloop=True):
"""
Show all the figures and enter the gtk main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if mainloop and gtk.main_level() == 0 and \
len(Gcf.get_all_fig_managers())>0:
gtk.main()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTK(thisFig)
manager = FigureManagerGTK(canvas, num)
# equals:
#manager = FigureManagerGTK(FigureCanvasGTK(Figure(*args, **kwargs), num)
return manager
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self._idle_event_id = gobject.idle_add(self.idle_event)
def destroy(self):
#gtk.DrawingArea.destroy(self)
gobject.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "hit", key
FigureCanvasBase.key_press_event(self, key)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "release", key
FigureCanvasBase.key_release_event(self, key)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval <256:
key = chr(event.keyval)
else:
key = None
ctrl = event.state & gdk.CONTROL_MASK
shift = event.state & gdk.SHIFT_MASK
return key
def configure_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
def idle_draw(*args):
self.draw()
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
if is_string_like(filename):
try:
pixbuf.save(filename, format)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def get_default_filetype(self):
return 'png'
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.window.set_title("Figure %d" % num)
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
self.toolbar.destroy()
self.__dict__.clear()
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image_file, callback(str)
toolitems = (
('Home', 'Reset original view', 'home.png', 'home'),
('Back', 'Back to previous view','back.png', 'back'),
('Forward', 'Forward to next view','forward.png', 'forward'),
('Pan', 'Pan axes with left mouse, zoom with right', 'move.png','pan'),
('Zoom', 'Zoom to rectangle','zoom_to_rect.png', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots','subplots.png', 'configure_subplots'),
('Save', 'Save the figure','filesave.png', 'save_figure'),
)
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self._idle_draw_id = 0
def set_message(self, s):
if self._idle_draw_id == 0:
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._imageBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in min(x0,x1), min(y0, y1), w, h]
try: lastrect, imageBack = self._imageBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._imageBack = axrect, drawable.get_image(*axrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
else:
def idle_draw(*args):
drawable.draw_image(gc, imageBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._init_toolbar2_4()
else:
self._init_toolbar2_2()
def _init_toolbar2_2(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.append_space()
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
w = self.append_item(text,
tooltip_text,
'Private',
image,
getattr(self, callback)
)
self.append_space()
self.message = gtk.Label()
self.append_widget(self.message, None, None)
self.message.show()
def _init_toolbar2_4(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win,)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class NavigationToolbar(gtk.Toolbar):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image, callback(str), callback_arg, scroll(bool)
toolitems = (
('Left', 'Pan left with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_BACK, 'panx', -1, True),
('Right', 'Pan right with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_FORWARD, 'panx', 1, True),
('Zoom In X',
'Zoom In X (shrink the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomx', 1, True),
('Zoom Out X',
'Zoom Out X (expand the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomx', -1, True),
(None, None, None, None, None, None,),
('Up', 'Pan up with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_UP, 'pany', 1, True),
('Down', 'Pan down with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_DOWN, 'pany', -1, True),
('Zoom In Y',
'Zoom in Y (shrink the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomy', 1, True),
('Zoom Out Y',
'Zoom Out Y (expand the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomy', -1, True),
(None, None, None, None, None, None,),
('Save', 'Save the figure',
gtk.STOCK_SAVE, 'save_figure', None, False),
)
def __init__(self, canvas, window):
"""
figManager is the FigureManagerGTK instance that contains the
toolbar, with attributes figure, window and drawingArea
"""
gtk.Toolbar.__init__(self)
self.canvas = canvas
# Note: gtk.Toolbar already has a 'window' attribute
self.win = window
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._create_toolitems_2_4()
self.update = self._update_2_4
self.fileselect = FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
self._create_toolitems_2_2()
self.update = self._update_2_2
self.fileselect = FileSelection(title='Save the figure',
parent=self.win)
self.show_all()
self.update()
def _create_toolitems_2_4(self):
# use the GTK+ 2.4 GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
if callback_arg:
tbutton.connect('clicked', getattr(self, callback),
callback_arg)
else:
tbutton.connect('clicked', getattr(self, callback))
if scroll:
tbutton.connect('scroll_event', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
# Axes toolitem, is empty at start, update() adds a menu if >=2 axes
self.axes_toolitem = gtk.ToolItem()
self.insert(self.axes_toolitem, 0)
self.axes_toolitem.set_tooltip (
self.tooltips,
tip_text='Select axes that controls affect',
tip_private = 'Private')
align = gtk.Alignment (xalign=0.5, yalign=0.5, xscale=0.0, yscale=0.0)
self.axes_toolitem.add(align)
self.menubutton = gtk.Button ("Axes")
align.add (self.menubutton)
def position_menu (menu):
"""Function for positioning a popup menu.
Place menu below the menu button, but ensure it does not go off
the bottom of the screen.
The default is to popup menu at current mouse position
"""
x0, y0 = self.window.get_origin()
x1, y1, m = self.window.get_pointer()
x2, y2 = self.menubutton.get_pointer()
sc_h = self.get_screen().get_height() # requires GTK+ 2.2 +
w, h = menu.size_request()
x = x0 + x1 - x2
y = y0 + y1 - y2 + self.menubutton.allocation.height
y = min(y, sc_h - h)
return x, y, True
def button_clicked (button, data=None):
self.axismenu.popup (None, None, position_menu, 0,
gtk.get_current_event_time())
self.menubutton.connect ("clicked", button_clicked)
def _update_2_4(self):
# for GTK+ 2.4+
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
self.axismenu = self._make_axis_menu()
self.menubutton.show_all()
else:
self.menubutton.hide()
self.set_active(range(len(self._axes)))
def _create_toolitems_2_2(self):
# use the GTK+ 2.2 (and lower) GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.append_space()
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
item = self.append_item(text, tooltip_text, 'Private', image,
getattr(self, callback), callback_arg)
if scroll:
item.connect("scroll_event", getattr(self, callback))
self.omenu = gtk.OptionMenu()
self.omenu.set_border_width(3)
self.insert_widget(
self.omenu,
'Select axes that controls affect',
'Private', 0)
def _update_2_2(self):
# for GTK+ 2.2 and lower
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
# set up the axis menu
self.omenu.set_menu( self._make_axis_menu() )
self.omenu.show_all()
else:
self.omenu.hide()
self.set_active(range(len(self._axes)))
def _make_axis_menu(self):
# called by self._update*()
def toggled(item, data=None):
if item == self.itemAll:
for item in items: item.set_active(True)
elif item == self.itemInvert:
for item in items:
item.set_active(not item.get_active())
ind = [i for i,item in enumerate(items) if item.get_active()]
self.set_active(ind)
menu = gtk.Menu()
self.itemAll = gtk.MenuItem("All")
menu.append(self.itemAll)
self.itemAll.connect("activate", toggled)
self.itemInvert = gtk.MenuItem("Invert")
menu.append(self.itemInvert)
self.itemInvert.connect("activate", toggled)
items = []
for i in range(len(self._axes)):
item = gtk.CheckMenuItem("Axis %d" % (i+1))
menu.append(item)
item.connect("toggled", toggled)
item.set_active(True)
items.append(item)
menu.show_all()
return menu
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, button, direction):
'panx in direction'
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
return True
def pany(self, button, direction):
'pany in direction'
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
return True
def zoomx(self, button, direction):
'zoomx in direction'
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
return True
def zoomy(self, button, direction):
'zoomy in direction'
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
return True
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
if gtk.pygtk_version >= (2,4,0):
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = filetypes.items()
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
self.hide()
return filename, self.ext
else:
class FileSelection(gtk.FileSelection):
"""GTK+ 2.2 and lower file selector which remembers the last
file/directory selected
"""
def __init__(self, path=None, title='Select a file', parent=None):
super(FileSelection, self).__init__(title)
if path: self.path = path
else: self.path = os.getcwd() + os.sep
if parent: self.set_transient_for(parent)
def get_filename_from_user(self, path=None, title=None):
if path: self.path = path
if title: self.set_title(title)
self.set_filename(self.path)
filename = None
if self.run() == int(gtk.RESPONSE_OK):
self.path = filename = self.get_filename()
self.hide()
ext = None
if filename is not None:
ext = os.path.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:]
return filename, ext
class DialogLineprops:
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in lines.Line2D.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0):
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
gtk.window_set_default_icon_from_file (
os.path.join (matplotlib.rcParams['datapath'], 'images', icon_filename))
except:
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureManager = FigureManagerGTK
| gpl-3.0 |
fred3m/toyz | setup.py | 1 | 4724 | import os
import sys
import glob
from setuptools import setup
from setuptools import find_packages
import subprocess
import warnings
def update_git_devstr(version, path=None):
"""
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
"""
try:
# Quick way to determine if we're in git or not - returns '' if not
devstr = get_git_devstr(sha=True, show_warning=False, path=path)
except OSError:
return version
if not devstr:
# Probably not in git so just pass silently
return version
if 'dev' in version: # update to the current git revision
version_base = version.split('.dev', 1)[0]
devstr = get_git_devstr(sha=False, show_warning=False, path=path)
return version_base + '.dev' + devstr
else:
#otherwise it's already the true/release version
return version
def get_git_devstr(sha=False, show_warning=True, path=None):
"""
Determines the number of revisions in this repository.
Parameters
----------
sha : bool
If True, the full SHA1 hash will be returned. Otherwise, the total
count of commits in the repository will be used as a "revision
number".
show_warning : bool
If True, issue a warning if git returns an error code, otherwise errors
pass silently.
path : str or None
If a string, specifies the directory to look in to find the git
repository. If `None`, the current working directory is used.
If given a filename it uses the directory containing that file.
Returns
-------
devversion : str
Either a string with the revsion number (if `sha` is False), the
SHA1 hash of the current commit (if `sha` is True), or an empty string
if git version info could not be identified.
"""
if path is None:
path = os.getcwd()
if not os.path.isdir(path):
path = os.path.abspath(os.path.dirname(path))
if not os.path.exists(os.path.join(path, '.git')):
return ''
if sha:
cmd = ['rev-parse'] # Faster for getting just the hash of HEAD
else:
cmd = ['rev-list', '--count']
try:
p = subprocess.Popen(['git'] + cmd + ['HEAD'], cwd=path,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
except OSError as e:
if show_warning:
warnings.warn('Error running git: ' + str(e))
return ''
if p.returncode == 128:
if show_warning:
warnings.warn('No git repository present at {0!r}! Using default '
'dev version.'.format(path))
return ''
elif p.returncode != 0:
if show_warning:
warnings.warn('Git failed while determining revision '
'count: ' + stderr)
return ''
if sha:
return stdout.decode('utf-8')[:40]
else:
return stdout.decode('utf-8').strip()
_last_generated_version = '0.0.dev146'
version = update_git_devstr(_last_generated_version)
githash = get_git_devstr(sha=True, show_warning=False)
major = 0
minor = 0
bugfix = 0
release = False
debug = False
# Package info
PACKAGE_NAME = "toyz"
DESCRIPTION = "Data reduction and analysis software"
LONG_DESC = "Interface to run python plots and scripts from a web browser"
AUTHOR = "Fred Moolekamp"
AUTHOR_EMAIL = "[email protected]"
LICENSE = "BSD 3-clause"
URL = "http://fred3m.github.io/toyz/"
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '1.1.0dev'
#VERSION = '1.1.0'
if 'dev' in VERSION:
VERSION += get_git_devstr(False)
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
packages = find_packages()
setup(name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
packages=packages,
scripts=scripts,
extras_require={
'all': [
'scipy>=0.15',
'matplotlib',
'pandas>=0.14',
'astropy>=0.4',
'sqlalchemy',
'pillow'
]
},
install_requires=[
'tornado>=4.0.2,<4.2',
'passlib',
'numpy>=1.5.1',
'six',
'importlib'
],
#provides=[PACKAGE_NAME],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESC,
zip_safe=False,
use_2to3=True,
include_package_data=True
) | bsd-3-clause |
lukeiwanski/tensorflow-opencl | tensorflow/examples/learn/text_classification_cnn.py | 53 | 4430 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
def cnn_model(features, target):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
target = tf.one_hot(target, 15, 1, 0)
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = learn.Estimator(model_fn=cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
sergiy-evision/math-algorithms | data science/Assessment 3/assessment 3.py | 1 | 1156 | import numpy as np
import pandas
from sklearn.cross_validation import KFold
from sklearn.cross_validation import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import scale
def get_k_score(x, cv, y):
res = []
for k in range(1, 51):
neigh = KNeighborsClassifier(n_neighbors=k)
arr = cross_val_score(neigh, x, y, cv=cv)
a = round(arr.sum() / len(arr), 2)
res.append(a)
print res
return np.array(res)
if __name__ == '__main__':
df = pandas.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data",
header=0,
index_col=None,
names=['CLS', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P', 'A', 'S', 'D', 'F'])
Y = df['CLS'].as_matrix()
X = df[['W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P', 'A', 'S', 'D', 'F']].as_matrix()
kf = KFold(len(Y), n_folds=5, shuffle=True, random_state=42)
ans = get_k_score(X, kf, Y)
print ans.max(), ans.argmax()
ans_scaled = get_k_score(scale(X), kf, Y)
print ans_scaled.max(), ans_scaled.argmin()
| mit |
ortylp/scipy | scipy/signal/waveforms.py | 64 | 14818 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
| bsd-3-clause |
jakobj/nest-simulator | pynest/examples/glif_cond_neuron.py | 14 | 9655 | # -*- coding: utf-8 -*-
#
# glif_cond_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Conductance-based generalized leaky integrate and fire (GLIF) neuron example
----------------------------------------------------------------------------
Simple example of how to use the ``glif_cond`` neuron model for
five different levels of GLIF neurons.
Four stimulation paradigms are illustrated for the GLIF model
with externally applied current and spikes impinging
Voltage traces, injecting current traces, threshold traces, synaptic
conductance traces and spikes are shown.
KEYWORDS: glif_cond
"""
##############################################################################
# First, we import all necessary modules to simulate, analyze and plot this
# example.
import nest
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
##############################################################################
# We initialize the nest and set the simulation resolution.
nest.ResetKernel()
resolution = 0.05
nest.SetKernelStatus({"resolution": resolution})
###############################################################################
# We create the five levels of GLIF model to be tested, i.e.,
# ``lif``, ``lif_r``, ``lif_asc``, ``lif_r_asc``, ``lif_r_asc_a``.
# For each level of GLIF model, we create a ``glif_cond`` node. The node is
# created by setting relative model mechanism parameters. Other neuron
# parameters are set as default. The five ``glif_cond`` node handles are
# combined as a list. Note that the default number of synaptic ports
# is two for spike inputs. One port is excitation receptor with time
# constant being 0.2 ms and reversal potential being 0.0 mV. The other port is
# inhibition receptor with time constant being 2.0 ms and -85.0 mV.
# Note that users can set as many synaptic ports as needed for ``glif_cond``
# by setting array parameters ``tau_syn`` and ``E_rev`` of the model.
n_lif = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_r = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc_a = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": True})
neurons = n_lif + n_lif_r + n_lif_asc + n_lif_r_asc + n_lif_r_asc_a
###############################################################################
# For the stimulation input to the glif_cond neurons, we create one excitation
# spike generator and one inhibition spike generator, each of which generates
# three spikes; we also create one step current generator and a Poisson
# generator, a parrot neuron(to be paired with the Poisson generator).
# The three different injections are spread to three different time periods,
# i.e., 0 ms ~ 200 ms, 200 ms ~ 500 ms, 600 ms ~ 900 ms.
# Configuration of the current generator includes the definition of the start
# and stop times and the amplitude of the injected current. Configuration of
# the Poisson generator includes the definition of the start and stop times and
# the rate of the injected spike train.
espikes = nest.Create("spike_generator",
params={"spike_times": [10., 100., 150.],
"spike_weights": [20.]*3})
ispikes = nest.Create("spike_generator",
params={"spike_times": [15., 99., 150.],
"spike_weights": [-20.]*3})
cg = nest.Create("step_current_generator",
params={"amplitude_values": [400., ],
"amplitude_times": [200., ],
"start": 200., "stop": 500.})
pg = nest.Create("poisson_generator",
params={"rate": 15000., "start": 600., "stop": 900.})
pn = nest.Create("parrot_neuron")
###############################################################################
# The generators are then connected to the neurons. Specification of
# the ``receptor_type`` uniquely defines the target receptor.
# We connect current generator to receptor 0, the excitation spike generator
# and the Poisson generator (via parrot neuron) to receptor 1, and the
# inhibition spike generator to receptor 2 of the GLIF neurons.
# Note that Poisson generator is connected to parrot neuron to transit the
# spikes to the glif_cond neuron.
nest.Connect(cg, neurons, syn_spec={"delay": resolution})
nest.Connect(espikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 1})
nest.Connect(ispikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 2})
nest.Connect(pg, pn, syn_spec={"delay": resolution})
nest.Connect(pn, neurons, syn_spec={"delay": resolution, "receptor_type": 1})
###############################################################################
# A ``multimeter`` is created and connected to the neurons. The parameters
# specified for the multimeter include the list of quantities that should be
# recorded and the time interval at which quantities are measured.
mm = nest.Create("multimeter",
params={"interval": resolution,
"record_from": ["V_m", "I", "g_1", "g_2",
"threshold",
"threshold_spike",
"threshold_voltage",
"ASCurrents_sum"]})
nest.Connect(mm, neurons)
###############################################################################
# A ``spike_recorder`` is created and connected to the neurons record the
# spikes generated by the glif_cond neurons.
sr = nest.Create("spike_recorder")
nest.Connect(neurons, sr)
###############################################################################
# Run the simulation for 1000 ms and retrieve recorded data from
# the multimeter and spike recorder.
nest.Simulate(1000.)
data = mm.events
senders = data["senders"]
spike_data = sr.events
spike_senders = spike_data["senders"]
spikes = spike_data["times"]
###############################################################################
# We plot the time traces of the membrane potential (in blue) and
# the overall threshold (in green), and the spikes (as red dots) in one panel;
# the spike component of threshold (in yellow) and the voltage component of
# threshold (in black) in another panel; the injected currents (in strong blue),
# the sum of after spike currents (in cyan) in the third panel; and the synaptic
# conductances of the two receptors (in blue and orange) in responding to the
# spike inputs to the neurons in the fourth panel. We plot all these four
# panels for each level of GLIF model in a separated figure.
glif_models = ["lif", "lif_r", "lif_asc", "lif_r_asc", "lif_r_asc_a"]
for i in range(len(glif_models)):
glif_model = glif_models[i]
node_id = neurons[i].global_id
plt.figure(glif_model)
gs = gridspec.GridSpec(4, 1, height_ratios=[2, 1, 1, 1])
t = data["times"][senders == 1]
ax1 = plt.subplot(gs[0])
plt.plot(t, data["V_m"][senders == node_id], "b")
plt.plot(t, data["threshold"][senders == node_id], "g--")
plt.plot(spikes[spike_senders == node_id],
[max(data["threshold"][senders == node_id]) * 0.95] *
len(spikes[spike_senders == node_id]), "r.")
plt.legend(["V_m", "threshold", "spike"])
plt.ylabel("V (mV)")
plt.title("Simulation of glif_cond neuron of " + glif_model)
ax2 = plt.subplot(gs[1])
plt.plot(t, data["threshold_spike"][senders == node_id], "y")
plt.plot(t, data["threshold_voltage"][senders == node_id], "k--")
plt.legend(["threshold_spike", "threshold_voltage"])
plt.ylabel("V (mV)")
ax3 = plt.subplot(gs[2])
plt.plot(t, data["I"][senders == node_id], "--")
plt.plot(t, data["ASCurrents_sum"][senders == node_id], "c-.")
plt.legend(["I_e", "ASCurrents_sum", "I_syn"])
plt.ylabel("I (pA)")
plt.xlabel("t (ms)")
ax4 = plt.subplot(gs[3])
plt.plot(t, data["g_1"][senders == node_id], "-")
plt.plot(t, data["g_2"][senders == node_id], "--")
plt.legend(["G_1", "G_2"])
plt.ylabel("G (nS)")
plt.xlabel("t (ms)")
plt.show()
| gpl-2.0 |
michaelbramwell/sms-tools | lectures/06-Harmonic-model/plots-code/f0Twm-piano.py | 19 | 1261 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackman
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import stft as STFT
import sineModel as SM
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/piano.wav')
w = np.blackman(1501)
N = 2048
t = -90
minf0 = 100
maxf0 = 300
f0et = 1
maxnpeaksTwm = 4
H = 128
x1 = x[1.5*fs:1.8*fs]
plt.figure(1, figsize=(9, 7))
mX, pX = STFT.stftAnal(x, fs, w, N, H)
f0 = HM.f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et)
f0 = UF.cleaningTrack(f0, 5)
yf0 = UF.sinewaveSynth(f0, .8, H, fs)
f0[f0==0] = np.nan
maxplotfreq = 800.0
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N*maxplotfreq/fs+1]))
plt.autoscale(tight=True)
plt.plot(frmTime, f0, linewidth=2, color='k')
plt.autoscale(tight=True)
plt.title('mX + f0 (piano.wav), TWM')
plt.tight_layout()
plt.savefig('f0Twm-piano.png')
UF.wavwrite(yf0, fs, 'f0Twm-piano.wav')
plt.show()
| agpl-3.0 |
alexsavio/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
idealabasu/code_pynamics | python/pynamics_examples/bouncy2.py | 1 | 3683 | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import pynamics
from pynamics.frame import Frame
from pynamics.variable_types import Differentiable,Constant,Variable
from pynamics.system import System
from pynamics.body import Body
from pynamics.dyadic import Dyadic
from pynamics.output import Output
from pynamics.output import PointsOutput
from pynamics.particle import Particle
import pynamics.integration
import sympy
import numpy
import matplotlib.pyplot as plt
plt.ion()
from math import pi
system = System()
pynamics.set_system(__name__,system)
error = 1e-3
error_tol = 1e-3
alpha = 1e6
beta = 1e5
#preload1 = Constant('preload1',0*pi/180,system)
a = Constant(0,'a',system)
l1 = Constant(1,'l1',system)
m1 = Constant(1e1,'m1',system)
m2 = Constant(1e0,'m2',system)
k = Constant(1e4,'k',system)
l0 = Constant(1,'l0',system)
b = Constant(5e0,'b',system)
g = Constant(9.81,'g',system)
Ixx_A = Constant(1,'Ixx_A',system)
Iyy_A = Constant(1,'Iyy_A',system)
Izz_A = Constant(1,'Izz_A',system)
tinitial = 0
tfinal = 10
tstep = 1/30
t = numpy.r_[tinitial:tfinal:tstep]
x1,x1_d,x1_dd = Differentiable('x1',system)
y1,y1_d,y1_dd = Differentiable('y1',system)
q1,q1_d,q1_dd = Differentiable('q1',system)
y2,y2_d,y2_dd = Differentiable('x2',system)
initialvalues = {}
initialvalues[q1]=0
initialvalues[q1_d]=.01
initialvalues[x1]=0
initialvalues[x1_d]=0
initialvalues[y1]=2
initialvalues[y1_d]=0
initialvalues[y2]=1
initialvalues[y2_d]=0
statevariables = system.get_state_variables()
ini = [initialvalues[item] for item in statevariables]
N = Frame('N')
A = Frame('A')
system.set_newtonian(N)
A.rotate_fixed_axis_directed(N,[0,0,1],q1,system)
pOrigin = 0*N.x
pm1 = x1*N.x +y1*N.y
pm2 = pm1 +a*A.x - y2*A.y
IA = Dyadic.build(A,Ixx_A,Iyy_A,Izz_A)
BodyA = Body('BodyA',A,pm1,m1,IA,system)
Particle2 = Particle(pm2,m2,'Particle2',system)
vpm1 = pm1.time_derivative(N,system)
vpm2 = pm2.time_derivative(N,system)
l_ = pm1-pm2
l = (l_.dot(l_))**.5
l_d =system.derivative(l)
stretch = l - l0
ul_ = l_*(l**-1)
vl = l_.time_derivative(N,system)
system.add_spring_force1(k,stretch*ul_,vl)
#system.addforce(-k*stretch*ul_,vpm1)
#system.addforce(k*stretch*ul_,vpm2)
system.addforce(-b*l_d*ul_,vpm1)
system.addforce(b*l_d*ul_,vpm2)
#system.addforce(k*l*ul_,vpm2)
#system.addforce(-b*vl,vl)
#system.addforce(-b*vl,vl)
#system.addforce(-b*vl,vl)
system.addforcegravity(-g*N.y)
#system.addforcegravity(-g*N.y)
#system.addforcegravity(-g*N.y)
eq1 = []
eq1.append(pm1.dot(N.y)-0)
eq1.append(pm2.dot(N.y)-0)
eq1_d=[system.derivative(item) for item in eq1]
eq1_dd=[system.derivative(system.derivative(item)) for item in eq1]
a = []
a.append(0-pm1.dot(N.y))
a.append(0-pm2.dot(N.y))
b = [(item+abs(item)) for item in a]
x1 = BodyA.pCM.dot(N.y)
x2 = Particle2.pCM.dot(N.y)
f,ma = system.getdynamics()
#func = system.state_space_post_invert(f,ma,eq)
func = system.state_space_post_invert2(f,ma,eq1_dd,eq1_d,eq1,eq_active = b)
states=pynamics.integration.integrate_odeint(func,ini,t,rtol = error, atol = error, args=({'alpha':alpha,'beta':beta, 'constants':system.constant_values},),full_output = 1,mxstep = int(1e5))
states = states[0]
KE = system.get_KE()
PE = system.getPEGravity(pOrigin) - system.getPESprings()
output = Output([x1,x2,l, KE-PE],system)
y = output.calc(states)
plt.figure(0)
plt.plot(t,y[:,0])
plt.plot(t,y[:,1])
plt.axis('equal')
plt.figure(1)
plt.plot(t,y[:,2])
plt.axis('equal')
plt.figure(2)
plt.plot(t,y[:,3])
#plt.axis('equal')
points = [BodyA.pCM,Particle2.pCM]
points = PointsOutput(points)
points.calc(states)
points.animate(fps = 30, movie_name='bouncy2.mp4',lw=2)
| mit |
waldol1/BYU-AWESOME | scripts/cbad_simple_evaluation/add_extracted_points1.py | 1 | 3329 | import sys
import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
import json
from copy import deepcopy
import time
def pred_to_pts(color_img):
global_threshold = 127
slice_size = 25
# small_threshold = 0
small_threshold = 250
img = cv2.cvtColor( color_img, cv2.COLOR_RGB2GRAY )
ret, th = cv2.threshold(img,global_threshold,255,cv2.THRESH_BINARY)
connectivity = 4
s = time.time()
output= cv2.connectedComponentsWithStats(th, connectivity, cv2.CV_32S)
baselines = []
#skip background
for label_id in xrange(1, output[0]):
min_x = output[2][label_id][0]
min_y = output[2][label_id][1]
max_x = output[2][label_id][2] + min_x
max_y = output[2][label_id][3] + min_y
cnt = output[2][label_id][4]
if cnt < small_threshold:
continue
baseline = output[1][min_y:max_y, min_x:max_x]
pts = []
x_all, y_all = np.where(baseline == label_id)
first_idx = y_all.argmin()
first = (y_all[first_idx]+min_x, x_all[first_idx]+min_y)
pts.append(first)
for i in xrange(0, baseline.shape[1], slice_size):
next_i = i+slice_size
baseline_slice = baseline[:, i:next_i]
x, y = np.where(baseline_slice == label_id)
x_avg = x.mean()
y_avg = y.mean()
pts.append((int(y_avg+i+min_x), int(x_avg+min_y)))
last_idx = y_all.argmax()
last = (y_all[last_idx]+min_x, x_all[last_idx]+min_y)
pts.append(last)
if len(pts) <= 1:
continue
baselines.append(pts)
# img_copy = color_img.copy()
# for b in baselines:
# pts = np.array(b, np.int32)
# pts = pts.reshape((-1,1,2))
# cv2.polylines(img_copy,[pts],False,(0,255,255), thickness=1)
#
# plt.imshow(img_copy)
# plt.show()
return baselines
def write_baseline_pts(baselines, filename):
with open(filename, 'w') as f:
for baseline in baselines:
baseline_txt = []
for pt in baseline:
pt_txt = "{},{}".format(*pt)
baseline_txt.append(pt_txt)
f.write(";".join(baseline_txt)+"\n")
if __name__ == "__main__":
input_paths_path = sys.argv[1]
output_paths_path = sys.argv[2]
output_txt_folder_path = sys.argv[3]
with open(input_paths_path) as f:
input_paths = json.load(f)
output_paths = []
start = time.time()
cnt = 0
for i, input_path in enumerate(input_paths):
if i%10 == 0:
print i, cnt, (time.time() - start) / (i+1)
# image_path = input_path['gt_pixel_img_path']
image_path = input_path['pred_pixel_path']
save_path = os.path.basename(image_path)
save_path = os.path.splitext(save_path)[0]
save_path = "{}-{}.txt".format(save_path, i)
save_path = os.path.join(output_txt_folder_path, save_path)
img = cv2.imread(image_path)
baselines = pred_to_pts(img)
cnt += len(baselines)
write_baseline_pts(baselines, save_path)
output_path = deepcopy(input_path)
output_path['pred_baseline_path'] = save_path
output_paths.append(output_path)
with open(output_paths_path, 'w') as f:
json.dump(output_paths, f)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.