code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def momcond(self, params): #drop distfn as argument
#, mom2, quantile=None, shape=None
'''moment conditions for estimating distribution parameters by matching
quantiles, defines as many moment conditions as quantiles.
Returns
-------
difference : ndarray
difference between theoretical and empirical quantiles
Notes
-----
This can be used for method of moments or for generalized method of
moments.
'''
#this check looks redundant/unused know
if len(params) == 2:
loc, scale = params
elif len(params) == 3:
shape, loc, scale = params
else:
#raise NotImplementedError
pass #see whether this might work, seems to work for beta with 2 shape args
#mom2diff = np.array(distfn.stats(*params)) - mom2
#if not quantile is None:
pq, xq = self.pquant, self.xquant
#ppfdiff = distfn.ppf(pq, alpha)
cdfdiff = self.distfn.cdf(xq, *params) - pq
#return np.concatenate([mom2diff, cdfdiff[:1]])
return np.atleast_2d(cdfdiff) | moment conditions for estimating distribution parameters by matching
quantiles, defines as many moment conditions as quantiles.
Returns
-------
difference : ndarray
difference between theoretical and empirical quantiles
Notes
-----
This can be used for method of moments or for generalized method of
moments. | momcond | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def fitonce(self, start=None, weights=None, has_optimal_weights=False):
'''fit without estimating an optimal weighting matrix and return results
This is a convenience function that calls fitgmm and covparams with
a given weight matrix or the identity weight matrix.
This is useful if the optimal weight matrix is know (or is analytically
given) or if an optimal weight matrix cannot be calculated.
(Developer Notes: this function could go into GMM, but is needed in this
class, at least at the moment.)
Parameters
----------
Returns
-------
results : GMMResult instance
result instance with params and _cov_params attached
See Also
--------
fitgmm
cov_params
'''
if weights is None:
weights = np.eye(self.nmoms)
params = self.fitgmm(start=start)
# TODO: rewrite this old hack, should use fitgmm or fit maxiter=0
self.results.params = params #required before call to self.cov_params
self.results.wargs = {} #required before call to self.cov_params
self.results.options_other = {'weights_method':'cov'}
# TODO: which weights_method? There should not be any needed ?
_cov_params = self.results.cov_params(weights=weights,
has_optimal_weights=has_optimal_weights)
self.results.weights = weights
self.results.jval = self.gmmobjective(params, weights)
self.results.options_other.update({'has_optimal_weights':has_optimal_weights})
return self.results | fit without estimating an optimal weighting matrix and return results
This is a convenience function that calls fitgmm and covparams with
a given weight matrix or the identity weight matrix.
This is useful if the optimal weight matrix is know (or is analytically
given) or if an optimal weight matrix cannot be calculated.
(Developer Notes: this function could go into GMM, but is needed in this
class, at least at the moment.)
Parameters
----------
Returns
-------
results : GMMResult instance
result instance with params and _cov_params attached
See Also
--------
fitgmm
cov_params | fitonce | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def atleast_2dcol(x):
''' convert array_like to 2d from 1d or 0d
not tested because not used
'''
x = np.asarray(x)
if (x.ndim == 1):
x = x[:, None]
elif (x.ndim == 0):
x = np.atleast_2d(x)
elif (x.ndim > 0):
raise ValueError('too many dimensions')
return x | convert array_like to 2d from 1d or 0d
not tested because not used | atleast_2dcol | python | statsmodels/statsmodels | statsmodels/sandbox/regression/predstd.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/predstd.py | BSD-3-Clause |
def wls_prediction_std(res, exog=None, weights=None, alpha=0.05):
'''calculate standard deviation and confidence interval for prediction
applies to WLS and OLS, not to general GLS,
that is independently but not identically distributed observations
Parameters
----------
res : regression result instance
results of WLS or OLS regression required attributes see notes
exog : array_like (optional)
exogenous variables for points to predict
weights : scalar or array_like (optional)
weights as defined for WLS (inverse of variance of observation)
alpha : float (default: alpha = 0.05)
confidence level for two-sided hypothesis
Returns
-------
predstd : array_like, 1d
standard error of prediction
same length as rows of exog
interval_l, interval_u : array_like
lower und upper confidence bounds
Notes
-----
The result instance needs to have at least the following
res.model.predict() : predicted values or
res.fittedvalues : values used in estimation
res.cov_params() : covariance matrix of parameter estimates
If exog is 1d, then it is interpreted as one observation,
i.e. a row vector.
testing status: not compared with other packages
References
----------
Greene p.111 for OLS, extended to WLS by analogy
'''
# work around current bug:
# fit does not attach results to model, predict broken
#res.model.results
covb = res.cov_params()
if exog is None:
exog = res.model.exog
predicted = res.fittedvalues
if weights is None:
weights = res.model.weights
else:
exog = np.atleast_2d(exog)
if covb.shape[1] != exog.shape[1]:
raise ValueError('wrong shape of exog')
predicted = res.model.predict(res.params, exog)
if weights is None:
weights = 1.
else:
weights = np.asarray(weights)
if weights.size > 1 and len(weights) != exog.shape[0]:
raise ValueError('weights and exog do not have matching shape')
# full covariance:
#predvar = res3.mse_resid + np.diag(np.dot(X2,np.dot(covb,X2.T)))
# predication variance only
predvar = res.mse_resid/weights + (exog * np.dot(covb, exog.T).T).sum(1)
predstd = np.sqrt(predvar)
tppf = stats.t.isf(alpha/2., res.df_resid)
interval_u = predicted + tppf * predstd
interval_l = predicted - tppf * predstd
return predstd, interval_l, interval_u | calculate standard deviation and confidence interval for prediction
applies to WLS and OLS, not to general GLS,
that is independently but not identically distributed observations
Parameters
----------
res : regression result instance
results of WLS or OLS regression required attributes see notes
exog : array_like (optional)
exogenous variables for points to predict
weights : scalar or array_like (optional)
weights as defined for WLS (inverse of variance of observation)
alpha : float (default: alpha = 0.05)
confidence level for two-sided hypothesis
Returns
-------
predstd : array_like, 1d
standard error of prediction
same length as rows of exog
interval_l, interval_u : array_like
lower und upper confidence bounds
Notes
-----
The result instance needs to have at least the following
res.model.predict() : predicted values or
res.fittedvalues : values used in estimation
res.cov_params() : covariance matrix of parameter estimates
If exog is 1d, then it is interpreted as one observation,
i.e. a row vector.
testing status: not compared with other packages
References
----------
Greene p.111 for OLS, extended to WLS by analogy | wls_prediction_std | python | statsmodels/statsmodels | statsmodels/sandbox/regression/predstd.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/predstd.py | BSD-3-Clause |
def pdf_moments_st(cnt):
"""Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
version of scipy.stats, any changes ?
the scipy.stats version has a bug and returns normal distribution
"""
N = len(cnt)
if N < 2:
raise ValueError("At least two moments must be given to "
"approximate the pdf.")
totp = poly1d(1)
sig = sqrt(cnt[1])
mu = cnt[0]
if N > 2:
Dvals = _hermnorm(N + 1)
for k in range(3, N + 1):
# Find Ck
Ck = 0.0
for n in range((k - 3) / 2):
m = k - 2 * n
if m % 2: # m is odd
momdiff = cnt[m - 1]
else:
momdiff = cnt[m - 1] - sig * sig * scipy.factorial2(m - 1)
Ck += Dvals[k][m] / sig ** m * momdiff
# Add to totp
raise SystemError
print(Dvals)
print(Ck)
totp = totp + Ck * Dvals[k]
def thisfunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn * xn / 2.0) / sqrt(2 * np.pi) / sig
return thisfunc, totp | Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
version of scipy.stats, any changes ?
the scipy.stats version has a bug and returns normal distribution | pdf_moments_st | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/extras.py | BSD-3-Clause |
def pdf_mvsk(mvsk):
"""Return the Gaussian expanded pdf function given the list of 1st, 2nd
moment and skew and Fisher (excess) kurtosis.
Parameters
----------
mvsk : list of mu, mc2, skew, kurt
distribution is matched to these four moments
Returns
-------
pdffunc : function
function that evaluates the pdf(x), where x is the non-standardized
random variable.
Notes
-----
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30
"""
N = len(mvsk)
if N < 4:
raise ValueError("Four moments must be given to "
"approximate the pdf.")
mu, mc2, skew, kurt = mvsk
totp = poly1d(1)
sig = sqrt(mc2)
if N > 2:
Dvals = _hermnorm(N + 1)
C3 = skew / 6.0
C4 = kurt / 24.0
# Note: Hermite polynomial for order 3 in _hermnorm is negative
# instead of positive
totp = totp - C3 * Dvals[3] + C4 * Dvals[4]
def pdffunc(x):
xn = (x - mu) / sig
return totp(xn) * np.exp(-xn * xn / 2.0) / np.sqrt(2 * np.pi) / sig
return pdffunc | Return the Gaussian expanded pdf function given the list of 1st, 2nd
moment and skew and Fisher (excess) kurtosis.
Parameters
----------
mvsk : list of mu, mc2, skew, kurt
distribution is matched to these four moments
Returns
-------
pdffunc : function
function that evaluates the pdf(x), where x is the non-standardized
random variable.
Notes
-----
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30 | pdf_mvsk | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/extras.py | BSD-3-Clause |
def pdf_moments(cnt):
"""Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
Notes
-----
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30
"""
N = len(cnt)
if N < 2:
raise ValueError("At least two moments must be given to "
"approximate the pdf.")
mc, mc2, mc3, mc4 = cnt
skew = mc3 / mc2 ** 1.5
kurt = mc4 / mc2 ** 2.0 - 3.0 # Fisher kurtosis, excess kurtosis
totp = poly1d(1)
sig = sqrt(cnt[1])
mu = cnt[0]
if N > 2:
Dvals = _hermnorm(N + 1)
## for k in range(3,N+1):
## # Find Ck
## Ck = 0.0
## for n in range((k-3)/2):
## m = k-2*n
## if m % 2: # m is odd
## momdiff = cnt[m-1]
## else:
## momdiff = cnt[m-1] - sig*sig*scipy.factorial2(m-1)
## Ck += Dvals[k][m] / sig**m * momdiff
## # Add to totp
## raise
## print Dvals
## print Ck
## totp = totp + Ck*Dvals[k]
C3 = skew / 6.0
C4 = kurt / 24.0
totp = totp - C3 * Dvals[3] + C4 * Dvals[4]
def thisfunc(x):
xn = (x - mu) / sig
return totp(xn) * np.exp(-xn * xn / 2.0) / np.sqrt(2 * np.pi) / sig
return thisfunc | Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
Notes
-----
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30 | pdf_moments | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/extras.py | BSD-3-Clause |
def mvstdnormcdf(lower, upper, corrcoef, **kwds):
"""standardized multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a standardized multivariate normal
distribution.
This function assumes standardized scale, that is the variance in each dimension
is one, but correlation can be arbitrary, covariance = correlation matrix
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
corrcoef : float or array_like
specifies correlation matrix in one of three ways, see notes
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
The correlation matrix corrcoef can be given in 3 different ways
If the multivariate normal is two-dimensional than only the
correlation coefficient needs to be provided.
For general dimension the correlation matrix can be provided either
as a one-dimensional array of the upper triangular correlation
coefficients stacked by rows, or as full square correlation matrix
See Also
--------
mvnormcdf : cdf of multivariate normal distribution without
standardization
Examples
--------
>>> print(mvstdnormcdf([-np.inf,-np.inf], [0.0,np.inf], 0.5))
0.5
>>> corr = [[1.0, 0, 0.5],[0,1,0],[0.5,0,1]]
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0], [0.0,0.0,0.0], corr, abseps=1e-6))
0.166666399198
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0],corr, abseps=1e-8))
something wrong completion with ERROR > EPS and MAXPTS function values used;
increase MAXPTS to decrease ERROR; 1.048330348e-006
0.166666546218
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0], corr, \
maxpts=100000, abseps=1e-8))
0.166666588293
"""
n = len(lower)
# do not know if converting to array is necessary,
# but it makes ndim check possible
lower = np.array(lower)
upper = np.array(upper)
corrcoef = np.array(corrcoef)
correl = np.zeros(int(n * (n - 1) / 2.0)) # dtype necessary?
if (lower.ndim != 1) or (upper.ndim != 1):
raise ValueError('can handle only 1D bounds')
if len(upper) != n:
raise ValueError('bounds have different lengths')
if n == 2 and corrcoef.size == 1:
correl = corrcoef
# print 'case scalar rho', n
elif corrcoef.ndim == 1 and len(corrcoef) == n * (n - 1) / 2.0:
# print 'case flat corr', corrcoeff.shape
correl = corrcoef
elif corrcoef.shape == (n, n):
# print 'case square corr', correl.shape
correl = corrcoef[np.tril_indices(n, -1)]
# for ii in range(n):
# for jj in range(ii):
# correl[ jj + ((ii-2)*(ii-1))/2] = corrcoef[ii,jj]
else:
raise ValueError('corrcoef has incorrect dimension')
if 'maxpts' not in kwds:
if n > 2:
kwds['maxpts'] = 10000 * n
lowinf = np.isneginf(lower)
uppinf = np.isposinf(upper)
infin = 2.0 * np.ones(n)
np.putmask(infin, lowinf, 0) # infin.putmask(0,lowinf)
np.putmask(infin, uppinf, 1) # infin.putmask(1,uppinf)
# this has to be last
np.putmask(infin, lowinf * uppinf, -1)
## #remove infs
## np.putmask(lower,lowinf,-100)# infin.putmask(0,lowinf)
## np.putmask(upper,uppinf,100) #infin.putmask(1,uppinf)
# print lower,',',upper,',',infin,',',correl
# print correl.shape
# print kwds.items()
error, cdfvalue, inform = mvndst(lower, upper, infin, correl, **kwds)
if inform:
print('something wrong', informcode[inform], error)
return cdfvalue | standardized multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a standardized multivariate normal
distribution.
This function assumes standardized scale, that is the variance in each dimension
is one, but correlation can be arbitrary, covariance = correlation matrix
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
corrcoef : float or array_like
specifies correlation matrix in one of three ways, see notes
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
The correlation matrix corrcoef can be given in 3 different ways
If the multivariate normal is two-dimensional than only the
correlation coefficient needs to be provided.
For general dimension the correlation matrix can be provided either
as a one-dimensional array of the upper triangular correlation
coefficients stacked by rows, or as full square correlation matrix
See Also
--------
mvnormcdf : cdf of multivariate normal distribution without
standardization
Examples
--------
>>> print(mvstdnormcdf([-np.inf,-np.inf], [0.0,np.inf], 0.5))
0.5
>>> corr = [[1.0, 0, 0.5],[0,1,0],[0.5,0,1]]
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0], [0.0,0.0,0.0], corr, abseps=1e-6))
0.166666399198
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0],corr, abseps=1e-8))
something wrong completion with ERROR > EPS and MAXPTS function values used;
increase MAXPTS to decrease ERROR; 1.048330348e-006
0.166666546218
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0], corr, \
maxpts=100000, abseps=1e-8))
0.166666588293 | mvstdnormcdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/extras.py | BSD-3-Clause |
def mvnormcdf(upper, mu, cov, lower=None, **kwds):
"""multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a multivariate normal distribution.
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
mu : array_lik, 1d
list or array of means
cov : array_like, 2d
specifies covariance matrix
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
This function normalizes the location and scale of the multivariate
normal distribution and then uses `mvstdnormcdf` to call the integration.
See Also
--------
mvstdnormcdf : location and scale standardized multivariate normal cdf
"""
upper = np.array(upper)
if lower is None:
lower = -np.ones(upper.shape) * np.inf
else:
lower = np.array(lower)
cov = np.array(cov)
stdev = np.sqrt(np.diag(cov)) # standard deviation vector
# do I need to make sure stdev is float and not int?
# is this correct to normalize to corr?
lower = (lower - mu) / stdev
upper = (upper - mu) / stdev
divrow = np.atleast_2d(stdev)
corr = cov / divrow / divrow.T
# v/np.sqrt(np.atleast_2d(np.diag(covv)))/np.sqrt(np.atleast_2d(np.diag(covv))).T
return mvstdnormcdf(lower, upper, corr, **kwds) | multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a multivariate normal distribution.
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
mu : array_lik, 1d
list or array of means
cov : array_like, 2d
specifies covariance matrix
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
This function normalizes the location and scale of the multivariate
normal distribution and then uses `mvstdnormcdf` to call the integration.
See Also
--------
mvstdnormcdf : location and scale standardized multivariate normal cdf | mvnormcdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/extras.py | BSD-3-Clause |
def mean_residual_life(x, frac=None, alpha=0.05):
'''empirical mean residual life or expected shortfall
Parameters
----------
x : 1-dimensional array_like
frac : list[float], optional
All entries must be between 0 and 1
alpha : float, default 0.05
FIXME: not actually used.
TODO:
check formula for std of mean
does not include case for all observations
last observations std is zero
vectorize loop using cumsum
frac does not work yet
'''
axis = 0 # searchsorted is 1d only
x = np.asarray(x)
nobs = x.shape[axis]
xsorted = np.sort(x, axis=axis)
if frac is None:
xthreshold = xsorted
else:
xthreshold = xsorted[np.floor(nobs * frac).astype(int)]
# use searchsorted instead of simple index in case of ties
xlargerindex = np.searchsorted(xsorted, xthreshold, side='right')
# TODO:replace loop with cumsum ?
result = []
for i in range(len(xthreshold)-1):
k_ind = xlargerindex[i]
rmean = x[k_ind:].mean()
# this does not work for last observations, nans
rstd = x[k_ind:].std()
rmstd = rstd/np.sqrt(nobs-k_ind) # std error of mean, check formula
result.append((k_ind, xthreshold[i], rmean, rmstd))
res = np.array(result)
crit = 1.96 # TODO: without loading stats, crit = -stats.t.ppf(0.05)
confint = res[:, 1:2] + crit * res[:, -1:] * np.array([[-1, 1]])
return np.column_stack((res, confint)) | empirical mean residual life or expected shortfall
Parameters
----------
x : 1-dimensional array_like
frac : list[float], optional
All entries must be between 0 and 1
alpha : float, default 0.05
FIXME: not actually used.
TODO:
check formula for std of mean
does not include case for all observations
last observations std is zero
vectorize loop using cumsum
frac does not work yet | mean_residual_life | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/try_pot.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/try_pot.py | BSD-3-Clause |
def _fitstart(self, x):
'''example method, method of moment estimator as starting values
Parameters
----------
x : ndarray
data for which the parameters are estimated
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
This example was written for the gamma distribution, but not verified
with literature
'''
loc = np.min([x.min(),0])
a = 4/stats.skew(x)**2
scale = np.std(x) / np.sqrt(a)
return (a, loc, scale) | example method, method of moment estimator as starting values
Parameters
----------
x : ndarray
data for which the parameters are estimated
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
This example was written for the gamma distribution, but not verified
with literature | _fitstart | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def _fitstart_beta(self, x, fixed=None):
'''method of moment estimator as starting values for beta distribution
Parameters
----------
x : ndarray
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
for method of moment estimator for known loc and scale
https://en.wikipedia.org/wiki/Beta_distribution#Parameter_estimation
http://www.itl.nist.gov/div898/handbook/eda/section3/eda366h.htm
NIST reference also includes reference to MLE in
Johnson, Kotz, and Balakrishan, Volume II, pages 221-235
'''
#todo: separate out this part to be used for other compact support distributions
# e.g. rdist, vonmises, and truncnorm
# but this might not work because it might still be distribution specific
a, b = x.min(), x.max()
eps = (a-b)*0.01
if fixed is None:
#this part not checked with books
loc = a - eps
scale = (a - b) * (1 + 2*eps)
else:
if np.isnan(fixed[-2]):
#estimate loc
loc = a - eps
else:
loc = fixed[-2]
if np.isnan(fixed[-1]):
#estimate scale
scale = (b + eps) - loc
else:
scale = fixed[-1]
#method of moment for known loc scale:
scale = float(scale)
xtrans = (x - loc)/scale
xm = xtrans.mean()
xv = xtrans.var()
tmp = (xm*(1-xm)/xv - 1)
p = xm * tmp
q = (1 - xm) * tmp
return (p, q, loc, scale) #check return type and should fixed be returned ? | method of moment estimator as starting values for beta distribution
Parameters
----------
x : ndarray
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
for method of moment estimator for known loc and scale
https://en.wikipedia.org/wiki/Beta_distribution#Parameter_estimation
http://www.itl.nist.gov/div898/handbook/eda/section3/eda366h.htm
NIST reference also includes reference to MLE in
Johnson, Kotz, and Balakrishan, Volume II, pages 221-235 | _fitstart_beta | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def _fitstart_poisson(self, x, fixed=None):
'''maximum likelihood estimator as starting values for Poisson distribution
Parameters
----------
x : ndarray
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
MLE :
https://en.wikipedia.org/wiki/Poisson_distribution#Maximum_likelihood
'''
#todo: separate out this part to be used for other compact support distributions
# e.g. rdist, vonmises, and truncnorm
# but this might not work because it might still be distribution specific
a = x.min()
eps = 0 # is this robust ?
if fixed is None:
#this part not checked with books
loc = a - eps
else:
if np.isnan(fixed[-1]):
#estimate loc
loc = a - eps
else:
loc = fixed[-1]
#MLE for standard (unshifted, if loc=0) Poisson distribution
xtrans = (x - loc)
lambd = xtrans.mean()
#second derivative d loglike/ dlambd Not used
#dlldlambd = 1/lambd # check
return (lambd, loc) #check return type and should fixed be returned ? | maximum likelihood estimator as starting values for Poisson distribution
Parameters
----------
x : ndarray
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
MLE :
https://en.wikipedia.org/wiki/Poisson_distribution#Maximum_likelihood | _fitstart_poisson | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def fit_fr(self, data, *args, **kwds):
'''estimate distribution parameters by MLE taking some parameters as fixed
Parameters
----------
data : ndarray, 1d
data for which the distribution parameters are estimated,
args : list ? check
starting values for optimization
kwds :
- 'frozen' : array_like
values for frozen distribution parameters and, for elements with
np.nan, the corresponding parameter will be estimated
Returns
-------
argest : ndarray
estimated parameters
Examples
--------
generate random sample
>>> np.random.seed(12345)
>>> x = stats.gamma.rvs(2.5, loc=0, scale=1.2, size=200)
estimate all parameters
>>> stats.gamma.fit(x)
array([ 2.0243194 , 0.20395655, 1.44411371])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, np.nan, np.nan])
array([ 2.0243194 , 0.20395655, 1.44411371])
keep loc fixed, estimate shape and scale parameters
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, np.nan])
array([ 2.45603985, 1.27333105])
keep loc and scale fixed, estimate shape parameter
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
array([ 3.00048828])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.2])
array([ 2.57792969])
estimate only scale parameter for fixed shape and loc
>>> stats.gamma.fit_fr(x, frozen=[2.5, 0.0, np.nan])
array([ 1.25087891])
Notes
-----
self is an instance of a distribution class. This can be attached to
scipy.stats.distributions.rv_continuous
*Todo*
* check if docstring is correct
* more input checking, args is list ? might also apply to current fit method
'''
loc0, scale0 = lmap(kwds.get, ['loc', 'scale'],[0.0, 1.0])
Narg = len(args)
if Narg == 0 and hasattr(self, '_fitstart'):
x0 = self._fitstart(data)
elif Narg > self.numargs:
raise ValueError("Too many input arguments.")
else:
args += (1.0,)*(self.numargs-Narg)
# location and scale are at the end
x0 = args + (loc0, scale0)
if 'frozen' in kwds:
frmask = np.array(kwds['frozen'])
if len(frmask) != self.numargs+2:
raise ValueError("Incorrect number of frozen arguments.")
else:
# keep starting values for not frozen parameters
for n in range(len(frmask)):
# Troubleshooting ex_generic_mle_tdist
if isinstance(frmask[n], np.ndarray) and frmask[n].size == 1:
frmask[n] = frmask[n].item()
# If there were array elements, then frmask will be object-dtype,
# in which case np.isnan will raise TypeError
frmask = frmask.astype(np.float64)
x0 = np.array(x0)[np.isnan(frmask)]
else:
frmask = None
#print(x0
#print(frmask
return optimize.fmin(self.nnlf_fr, x0,
args=(np.ravel(data), frmask), disp=0) | estimate distribution parameters by MLE taking some parameters as fixed
Parameters
----------
data : ndarray, 1d
data for which the distribution parameters are estimated,
args : list ? check
starting values for optimization
kwds :
- 'frozen' : array_like
values for frozen distribution parameters and, for elements with
np.nan, the corresponding parameter will be estimated
Returns
-------
argest : ndarray
estimated parameters
Examples
--------
generate random sample
>>> np.random.seed(12345)
>>> x = stats.gamma.rvs(2.5, loc=0, scale=1.2, size=200)
estimate all parameters
>>> stats.gamma.fit(x)
array([ 2.0243194 , 0.20395655, 1.44411371])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, np.nan, np.nan])
array([ 2.0243194 , 0.20395655, 1.44411371])
keep loc fixed, estimate shape and scale parameters
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, np.nan])
array([ 2.45603985, 1.27333105])
keep loc and scale fixed, estimate shape parameter
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
array([ 3.00048828])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.2])
array([ 2.57792969])
estimate only scale parameter for fixed shape and loc
>>> stats.gamma.fit_fr(x, frozen=[2.5, 0.0, np.nan])
array([ 1.25087891])
Notes
-----
self is an instance of a distribution class. This can be attached to
scipy.stats.distributions.rv_continuous
*Todo*
* check if docstring is correct
* more input checking, args is list ? might also apply to current fit method | fit_fr | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def expect(self, fn=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False):
'''calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution
conditional : bool (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
'''
if fn is None:
def fun(x, *args):
return x*self.pdf(x, loc=loc, scale=scale, *args)
else:
def fun(x, *args):
return fn(x)*self.pdf(x, loc=loc, scale=scale, *args)
if lb is None:
lb = loc + self.a * scale #(self.a - loc)/(1.0*scale)
if ub is None:
ub = loc + self.b * scale #(self.b - loc)/(1.0*scale)
if conditional:
invfac = (self.sf(lb, loc=loc, scale=scale, *args)
- self.sf(ub, loc=loc, scale=scale, *args))
else:
invfac = 1.0
return integrate.quad(fun, lb, ub,
args=args)[0]/invfac | calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution
conditional : bool (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad. | expect | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def expect_v2(self, fn=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False):
'''calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set using
quantiles of the distribution, see Notes
conditional : bool (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
The default limits are lb = self.ppf(1e-9, *args), ub = self.ppf(1-1e-9, *args)
For some heavy tailed distributions, 'alpha', 'cauchy', 'halfcauchy',
'levy', 'levy_l', and for 'ncf', the default limits are not set correctly
even when the expectation of the function is finite. In this case, the
integration limits, lb and ub, should be chosen by the user. For example,
for the ncf distribution, ub=1000 works in the examples.
There are also problems with numerical integration in some other cases,
for example if the distribution is very concentrated and the default limits
are too large.
'''
#changes: 20100809
#correction and refactoring how loc and scale are handled
#uses now _pdf
#needs more testing for distribution with bound support, e.g. genpareto
if fn is None:
def fun(x, *args):
return (loc + x*scale)*self._pdf(x, *args)
else:
def fun(x, *args):
return fn(loc + x*scale)*self._pdf(x, *args)
if lb is None:
#lb = self.a
try:
lb = self.ppf(1e-9, *args) #1e-14 quad fails for pareto
except ValueError:
lb = self.a
else:
lb = max(self.a, (lb - loc)/(1.0*scale)) #transform to standardized
if ub is None:
#ub = self.b
try:
ub = self.ppf(1-1e-9, *args)
except ValueError:
ub = self.b
else:
ub = min(self.b, (ub - loc)/(1.0*scale))
if conditional:
invfac = self._sf(lb,*args) - self._sf(ub,*args)
else:
invfac = 1.0
return integrate.quad(fun, lb, ub,
args=args, limit=500)[0]/invfac | calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set using
quantiles of the distribution, see Notes
conditional : bool (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
The default limits are lb = self.ppf(1e-9, *args), ub = self.ppf(1-1e-9, *args)
For some heavy tailed distributions, 'alpha', 'cauchy', 'halfcauchy',
'levy', 'levy_l', and for 'ncf', the default limits are not set correctly
even when the expectation of the function is finite. In this case, the
integration limits, lb and ub, should be chosen by the user. For example,
for the ncf distribution, ub=1000 works in the examples.
There are also problems with numerical integration in some other cases,
for example if the distribution is very concentrated and the default limits
are too large. | expect_v2 | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def distfitbootstrap(sample, distr, nrepl=100):
'''run bootstrap for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : ndarray
original sample data for bootstrap
distr : distribution instance with fit_fr method
nrepl : int
number of bootstrap replications
Returns
-------
res : array (nrepl,)
parameter estimates for all bootstrap replications
'''
nobs = len(sample)
res = np.zeros(nrepl)
for ii in range(nrepl):
rvsind = np.random.randint(nobs, size=nobs)
x = sample[rvsind]
res[ii] = distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
return res | run bootstrap for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : ndarray
original sample data for bootstrap
distr : distribution instance with fit_fr method
nrepl : int
number of bootstrap replications
Returns
-------
res : array (nrepl,)
parameter estimates for all bootstrap replications | distfitbootstrap | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def distfitmc(sample, distr, nrepl=100, distkwds={}):
'''run Monte Carlo for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : ndarray
original sample data, in Monte Carlo only used to get nobs,
distr : distribution instance with fit_fr method
nrepl : int
number of Monte Carlo replications
Returns
-------
res : array (nrepl,)
parameter estimates for all Monte Carlo replications
'''
arg = distkwds.pop('arg')
nobs = len(sample)
res = np.zeros(nrepl)
for ii in range(nrepl):
x = distr.rvs(arg, size=nobs, **distkwds)
res[ii] = distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
return res | run Monte Carlo for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : ndarray
original sample data, in Monte Carlo only used to get nobs,
distr : distribution instance with fit_fr method
nrepl : int
number of Monte Carlo replications
Returns
-------
res : array (nrepl,)
parameter estimates for all Monte Carlo replications | distfitmc | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def printresults(sample, arg, bres, kind='bootstrap'):
'''calculate and print(Bootstrap or Monte Carlo result
Parameters
----------
sample : ndarray
original sample data
arg : float (for general case will be array)
bres : ndarray
parameter estimates from Bootstrap or Monte Carlo run
kind : {'bootstrap', 'montecarlo'}
output is printed for Mootstrap (default) or Monte Carlo
Returns
-------
None, currently only printing
Notes
-----
still a bit a mess because it is used for both Bootstrap and Monte Carlo
made correction:
reference point for bootstrap is estimated parameter
not clear:
I'm not doing any ddof adjustment in estimation of variance, do we
need ddof>0 ?
todo: return results and string instead of printing
'''
print('true parameter value')
print(arg)
print('MLE estimate of parameters using sample (nobs=%d)'% (nobs))
argest = distr.fit_fr(sample, frozen=[np.nan, 0.0, 1.0])
print(argest)
if kind == 'bootstrap':
#bootstrap compares to estimate from sample
arg = argest
print('%s distribution of parameter estimate (nrepl=%d)'% (kind, nrepl))
print(f'mean = {bres.mean(0):f}, bias={bres.mean(0)-arg:f}')
print('median', np.median(bres, axis=0))
print('var and std', bres.var(0), np.sqrt(bres.var(0)))
bmse = ((bres - arg)**2).mean(0)
print('mse, rmse', bmse, np.sqrt(bmse))
bressorted = np.sort(bres)
print('%s confidence interval (90%% coverage)' % kind)
print(bressorted[np.floor(nrepl*0.05)], bressorted[np.floor(nrepl*0.95)])
print('%s confidence interval (90%% coverage) normal approximation' % kind)
print(stats.norm.ppf(0.05, loc=bres.mean(), scale=bres.std()),)
print(stats.norm.isf(0.05, loc=bres.mean(), scale=bres.std()))
print('Kolmogorov-Smirnov test for normality of %s distribution' % kind)
print(' - estimated parameters, p-values not really correct')
print(stats.kstest(bres, 'norm', (bres.mean(), bres.std()))) | calculate and print(Bootstrap or Monte Carlo result
Parameters
----------
sample : ndarray
original sample data
arg : float (for general case will be array)
bres : ndarray
parameter estimates from Bootstrap or Monte Carlo run
kind : {'bootstrap', 'montecarlo'}
output is printed for Mootstrap (default) or Monte Carlo
Returns
-------
None, currently only printing
Notes
-----
still a bit a mess because it is used for both Bootstrap and Monte Carlo
made correction:
reference point for bootstrap is estimated parameter
not clear:
I'm not doing any ddof adjustment in estimation of variance, do we
need ddof>0 ?
todo: return results and string instead of printing | printresults | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def paramstopot(thresh, shape, scale):
'''transform shape scale for peak over threshold
y = x-u|x>u ~ GPD(k, sigma-k*u) if x ~ GPD(k, sigma)
notation of de Zea Bermudez, Kotz
k, sigma is shape, scale
'''
return shape, scale - shape * thresh | transform shape scale for peak over threshold
y = x-u|x>u ~ GPD(k, sigma-k*u) if x ~ GPD(k, sigma)
notation of de Zea Bermudez, Kotz
k, sigma is shape, scale | paramstopot | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/genpareto.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/genpareto.py | BSD-3-Clause |
def meanexcess(thresh, shape, scale):
'''mean excess function of genpareto
assert are inequality conditions in de Zea Bermudez, Kotz
'''
warnif(shape > -1, 'shape > -1')
warnif(thresh >= 0, 'thresh >= 0') # make it weak inequality
warnif((scale - shape * thresh) > 0, '(scale - shape*thresh) > 0')
return (scale - shape * thresh) / (1 + shape) | mean excess function of genpareto
assert are inequality conditions in de Zea Bermudez, Kotz | meanexcess | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/genpareto.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/genpareto.py | BSD-3-Clause |
def gammamomentcond(distfn, params, mom2, quantile=None):
'''estimate distribution parameters based method of moments (mean,
variance) for distributions with 1 shape parameter and fixed loc=0.
Returns
-------
cond : function
Notes
-----
first test version, quantile argument not used
'''
def cond(params):
alpha, scale = params
mom2s = distfn.stats(alpha, 0.,scale)
#quantil
return np.array(mom2)-mom2s
return cond | estimate distribution parameters based method of moments (mean,
variance) for distributions with 1 shape parameter and fixed loc=0.
Returns
-------
cond : function
Notes
-----
first test version, quantile argument not used | gammamomentcond | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def gammamomentcond2(distfn, params, mom2, quantile=None):
'''estimate distribution parameters based method of moments (mean,
variance) for distributions with 1 shape parameter and fixed loc=0.
Returns
-------
difference : ndarray
difference between theoretical and empirical moments
Notes
-----
first test version, quantile argument not used
The only difference to previous function is return type.
'''
alpha, scale = params
mom2s = distfn.stats(alpha, 0.,scale)
return np.array(mom2)-mom2s | estimate distribution parameters based method of moments (mean,
variance) for distributions with 1 shape parameter and fixed loc=0.
Returns
-------
difference : ndarray
difference between theoretical and empirical moments
Notes
-----
first test version, quantile argument not used
The only difference to previous function is return type. | gammamomentcond2 | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def momentcondunbound(distfn, params, mom2, quantile=None):
'''moment conditions for estimating distribution parameters using method
of moments, uses mean, variance and one quantile for distributions
with 1 shape parameter.
Returns
-------
difference : ndarray
difference between theoretical and empirical moments and quantiles
'''
shape, loc, scale = params
mom2diff = np.array(distfn.stats(shape, loc,scale)) - mom2
if quantile is not None:
pq, xq = quantile
#ppfdiff = distfn.ppf(pq, alpha)
cdfdiff = distfn.cdf(xq, shape, loc, scale) - pq
return np.concatenate([mom2diff, cdfdiff[:1]])
return mom2diff | moment conditions for estimating distribution parameters using method
of moments, uses mean, variance and one quantile for distributions
with 1 shape parameter.
Returns
-------
difference : ndarray
difference between theoretical and empirical moments and quantiles | momentcondunbound | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def momentcondunboundls(distfn, params, mom2, quantile=None, shape=None):
'''moment conditions for estimating loc and scale of a distribution
with method of moments using either 2 quantiles or 2 moments (not both).
Returns
-------
difference : ndarray
difference between theoretical and empirical moments or quantiles
'''
loc, scale = params
mom2diff = np.array(distfn.stats(shape, loc, scale)) - mom2
if quantile is not None:
pq, xq = quantile
#ppfdiff = distfn.ppf(pq, alpha)
cdfdiff = distfn.cdf(xq, shape, loc, scale) - pq
#return np.concatenate([mom2diff, cdfdiff[:1]])
return cdfdiff
return mom2diff | moment conditions for estimating loc and scale of a distribution
with method of moments using either 2 quantiles or 2 moments (not both).
Returns
-------
difference : ndarray
difference between theoretical and empirical moments or quantiles | momentcondunboundls | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def nloglike(params):
'''negative loglikelihood function of binned data
corresponds to multinomial
'''
prob = np.diff(distfn.cdf(binedges, *params))
return -(lnnobsfact + np.sum(freq*np.log(prob)- special.gammaln(freq+1))) | negative loglikelihood function of binned data
corresponds to multinomial | fitbinned.nloglike | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def fitbinned(distfn, freq, binedges, start, fixed=None):
'''estimate parameters of distribution function for binned data using MLE
Parameters
----------
distfn : distribution instance
needs to have cdf method, as in scipy.stats
freq : ndarray, 1d
frequency count, e.g. obtained by histogram
binedges : ndarray, 1d
binedges including lower and upper bound
start : tuple or array_like ?
starting values, needs to have correct length
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option
added factorial
'''
if fixed is not None:
raise NotImplementedError
nobs = np.sum(freq)
lnnobsfact = special.gammaln(nobs+1)
def nloglike(params):
'''negative loglikelihood function of binned data
corresponds to multinomial
'''
prob = np.diff(distfn.cdf(binedges, *params))
return -(lnnobsfact + np.sum(freq*np.log(prob)- special.gammaln(freq+1)))
return optimize.fmin(nloglike, start) | estimate parameters of distribution function for binned data using MLE
Parameters
----------
distfn : distribution instance
needs to have cdf method, as in scipy.stats
freq : ndarray, 1d
frequency count, e.g. obtained by histogram
binedges : ndarray, 1d
binedges including lower and upper bound
start : tuple or array_like ?
starting values, needs to have correct length
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option
added factorial | fitbinned | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def fitbinnedgmm(distfn, freq, binedges, start, fixed=None, weightsoptimal=True):
'''estimate parameters of distribution function for binned data using GMM
Parameters
----------
distfn : distribution instance
needs to have cdf method, as in scipy.stats
freq : ndarray, 1d
frequency count, e.g. obtained by histogram
binedges : ndarray, 1d
binedges including lower and upper bound
start : tuple or array_like ?
starting values, needs to have correct length
fixed : None
not used yet
weightsoptimal : bool
If true, then the optimal weighting matrix for GMM is used. If false,
then the identity matrix is used
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option
added factorial
'''
if fixed is not None:
raise NotImplementedError
nobs = np.sum(freq)
if weightsoptimal:
weights = freq/float(nobs)
else:
weights = np.ones(len(freq))
freqnormed = freq/float(nobs)
# skip turning weights into matrix diag(freq/float(nobs))
def gmmobjective(params):
'''negative loglikelihood function of binned data
corresponds to multinomial
'''
prob = np.diff(distfn.cdf(binedges, *params))
momcond = freqnormed - prob
return np.dot(momcond*weights, momcond)
return optimize.fmin(gmmobjective, start) | estimate parameters of distribution function for binned data using GMM
Parameters
----------
distfn : distribution instance
needs to have cdf method, as in scipy.stats
freq : ndarray, 1d
frequency count, e.g. obtained by histogram
binedges : ndarray, 1d
binedges including lower and upper bound
start : tuple or array_like ?
starting values, needs to have correct length
fixed : None
not used yet
weightsoptimal : bool
If true, then the optimal weighting matrix for GMM is used. If false,
then the identity matrix is used
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option
added factorial | fitbinnedgmm | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def logmps(params, xsorted, dist):
'''calculate negative log of Product-of-Spacings
Parameters
----------
params : array_like, tuple ?
parameters of the distribution funciton
xsorted : array_like
data that is already sorted
dist : instance of a distribution class
only cdf method is used
Returns
-------
mps : float
negative log of Product-of-Spacings
Notes
-----
MPS definiton from JKB page 233
'''
xcdf = np.r_[0., dist.cdf(xsorted, *params), 1.]
D = np.diff(xcdf)
return -np.log(D).mean() | calculate negative log of Product-of-Spacings
Parameters
----------
params : array_like, tuple ?
parameters of the distribution funciton
xsorted : array_like
data that is already sorted
dist : instance of a distribution class
only cdf method is used
Returns
-------
mps : float
negative log of Product-of-Spacings
Notes
-----
MPS definiton from JKB page 233 | logmps | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def getstartparams(dist, data):
'''get starting values for estimation of distribution parameters
Parameters
----------
dist : distribution instance
the distribution instance needs to have either a method fitstart
or an attribute numargs
data : ndarray
data for which preliminary estimator or starting value for
parameter estimation is desired
Returns
-------
x0 : ndarray
preliminary estimate or starting value for the parameters of
the distribution given the data, including loc and scale
'''
if hasattr(dist, 'fitstart'):
#x0 = getattr(dist, 'fitstart')(data)
x0 = dist.fitstart(data)
else:
if np.isfinite(dist.a):
x0 = np.r_[[1.]*dist.numargs, (data.min()-1), 1.]
else:
x0 = np.r_[[1.]*dist.numargs, (data.mean()-1), 1.]
return x0 | get starting values for estimation of distribution parameters
Parameters
----------
dist : distribution instance
the distribution instance needs to have either a method fitstart
or an attribute numargs
data : ndarray
data for which preliminary estimator or starting value for
parameter estimation is desired
Returns
-------
x0 : ndarray
preliminary estimate or starting value for the parameters of
the distribution given the data, including loc and scale | getstartparams | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def fit_mps(dist, data, x0=None):
'''Estimate distribution parameters with Maximum Product-of-Spacings
Parameters
----------
params : array_like, tuple ?
parameters of the distribution funciton
xsorted : array_like
data that is already sorted
dist : instance of a distribution class
only cdf method is used
Returns
-------
x : ndarray
estimates for the parameters of the distribution given the data,
including loc and scale
'''
xsorted = np.sort(data)
if x0 is None:
x0 = getstartparams(dist, xsorted)
args = (xsorted, dist)
print(x0)
#print(args)
return optimize.fmin(logmps, x0, args=args) | Estimate distribution parameters with Maximum Product-of-Spacings
Parameters
----------
params : array_like, tuple ?
parameters of the distribution funciton
xsorted : array_like
data that is already sorted
dist : instance of a distribution class
only cdf method is used
Returns
-------
x : ndarray
estimates for the parameters of the distribution given the data,
including loc and scale | fit_mps | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
a, b : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
D : float
KS statistic
p-value : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import ks_2samp
>>> #fix random seed to get the same result
>>> np.random.seed(12345678)
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
different distribution
we can reject the null hypothesis since the pvalue is below 1%
>>> rvs1 = stats.norm.rvs(size=n1,loc=0.,scale=1)
>>> rvs2 = stats.norm.rvs(size=n2,loc=0.5,scale=1.5)
>>> ks_2samp(rvs1,rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
slightly different distribution
we cannot reject the null hypothesis at a 10% or lower alpha since
the pvalue at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2,loc=0.01,scale=1.0)
>>> ks_2samp(rvs1,rvs3)
(0.10333333333333333, 0.14498781825751686)
identical distribution
we cannot reject the null hypothesis since the pvalue is high, 41%
>>> rvs4 = stats.norm.rvs(size=n2,loc=0.0,scale=1.0)
>>> ks_2samp(rvs1,rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1, data2 = lmap(np.asarray, (data1, data2))
n1 = data1.shape[0]
n2 = data2.shape[0]
n1 = len(data1)
n2 = len(data2)
data1 = np.sort(data1)
data2 = np.sort(data2)
data_all = np.concatenate([data1,data2])
#reminder: searchsorted inserts 2nd into 1st array
cdf1 = np.searchsorted(data1,data_all,side='right')/(1.0*n1)
cdf2 = (np.searchsorted(data2,data_all,side='right'))/(1.0*n2)
d = np.max(np.absolute(cdf1-cdf2))
#Note: d absolute not signed distance
en = np.sqrt(n1*n2/float(n1+n2))
try:
prob = ksprob((en+0.12+0.11/en)*d)
except Exception:
prob = 1.0
return d, prob | Computes the Kolmogorov-Smirnof statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
a, b : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
D : float
KS statistic
p-value : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import ks_2samp
>>> #fix random seed to get the same result
>>> np.random.seed(12345678)
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
different distribution
we can reject the null hypothesis since the pvalue is below 1%
>>> rvs1 = stats.norm.rvs(size=n1,loc=0.,scale=1)
>>> rvs2 = stats.norm.rvs(size=n2,loc=0.5,scale=1.5)
>>> ks_2samp(rvs1,rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
slightly different distribution
we cannot reject the null hypothesis at a 10% or lower alpha since
the pvalue at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2,loc=0.01,scale=1.0)
>>> ks_2samp(rvs1,rvs3)
(0.10333333333333333, 0.14498781825751686)
identical distribution
we cannot reject the null hypothesis since the pvalue is high, 41%
>>> rvs4 = stats.norm.rvs(size=n2,loc=0.0,scale=1.0)
>>> ks_2samp(rvs1,rvs4)
(0.07999999999999996, 0.41126949729859719) | ks_2samp | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def kstest(rvs, cdf, args=(), N=20, alternative = 'two_sided', mode='approx',**kwds):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two_sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str or array or callable
string: name of a distribution in scipy.stats
array: 1-D observations of random variables
callable: function to generate random variables, requires keyword
argument `size`
cdf : str or callable
string: name of a distribution in scipy.stats, if rvs is a string then
cdf can evaluate to `False` or be the same as rvs
callable: function to evaluate cdf
args : tuple, sequence
distribution parameters, used if rvs or cdf are strings
N : int
sample size if rvs is string or callable
alternative : 'two_sided' (default), 'less' or 'greater'
defines the alternative hypothesis (see explanation)
mode : 'approx' (default) or 'asymp'
defines the distribution used for calculating p-value
'approx' : use approximation to exact distribution of test statistic
'asymp' : use asymptotic distribution of test statistic
Returns
-------
D : float
KS test statistic, either D, D+ or D-
p-value : float
one-tailed or two-tailed p-value
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, G(x)<=F(x), resp. G(x)>=F(x).
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import kstest
>>> x = np.linspace(-15,15,9)
>>> kstest(x,'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> kstest('norm','',N=100)
(0.058352892479417884, 0.88531190944151261)
is equivalent to this
>>> np.random.seed(987654321)
>>> kstest(stats.norm.rvs(size=100),'norm')
(0.058352892479417884, 0.88531190944151261)
Test against one-sided alternative hypothesis:
>>> np.random.seed(987654321)
Shift distribution to larger values, so that cdf_dgp(x)< norm.cdf(x):
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Do not reject equal distribution against alternative hypothesis: greater
>>> kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
Testing t distributed random variables against normal distribution:
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the kstest does not reject the hypothesis that the sample
came from the normal distribution
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at a alpha=10% level
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, str):
#cdf = getattr(stats, rvs).cdf
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError('if rvs is string, cdf has to be the same distribution')
if isinstance(cdf, str):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size':N}
vals = np.sort(rvs(*args,**kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
if alternative in ['two_sided', 'greater']:
Dplus = (np.arange(1.0, N+1)/N - cdfvals).max()
if alternative == 'greater':
return Dplus, distributions.ksone.sf(Dplus,N)
if alternative in ['two_sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return Dmin, distributions.ksone.sf(Dmin,N)
if alternative == 'two_sided':
D = np.max([Dplus,Dmin])
if mode == 'asymp':
return D, distributions.kstwobign.sf(D*np.sqrt(N))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D*np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000.0 :
return D, distributions.kstwobign.sf(D*np.sqrt(N))
else:
return D, distributions.ksone.sf(D,N)*2 | Perform the Kolmogorov-Smirnov test for goodness of fit
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two_sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str or array or callable
string: name of a distribution in scipy.stats
array: 1-D observations of random variables
callable: function to generate random variables, requires keyword
argument `size`
cdf : str or callable
string: name of a distribution in scipy.stats, if rvs is a string then
cdf can evaluate to `False` or be the same as rvs
callable: function to evaluate cdf
args : tuple, sequence
distribution parameters, used if rvs or cdf are strings
N : int
sample size if rvs is string or callable
alternative : 'two_sided' (default), 'less' or 'greater'
defines the alternative hypothesis (see explanation)
mode : 'approx' (default) or 'asymp'
defines the distribution used for calculating p-value
'approx' : use approximation to exact distribution of test statistic
'asymp' : use asymptotic distribution of test statistic
Returns
-------
D : float
KS test statistic, either D, D+ or D-
p-value : float
one-tailed or two-tailed p-value
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, G(x)<=F(x), resp. G(x)>=F(x).
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import kstest
>>> x = np.linspace(-15,15,9)
>>> kstest(x,'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> kstest('norm','',N=100)
(0.058352892479417884, 0.88531190944151261)
is equivalent to this
>>> np.random.seed(987654321)
>>> kstest(stats.norm.rvs(size=100),'norm')
(0.058352892479417884, 0.88531190944151261)
Test against one-sided alternative hypothesis:
>>> np.random.seed(987654321)
Shift distribution to larger values, so that cdf_dgp(x)< norm.cdf(x):
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Do not reject equal distribution against alternative hypothesis: greater
>>> kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
Testing t distributed random variables against normal distribution:
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the kstest does not reject the hypothesis that the sample
came from the normal distribution
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at a alpha=10% level
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224) | kstest | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def v(self):
'''Kuiper'''
return self.d_plus + self.d_minus | Kuiper | v | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def wsqu(self):
'''Cramer von Mises'''
nobs = self.nobs
cdfvals = self.cdfvals
#use literal formula, TODO: simplify with arange(,,2)
wsqu = ((cdfvals - (2. * np.arange(1., nobs+1) - 1)/nobs/2.)**2).sum() \
+ 1./nobs/12.
return wsqu | Cramer von Mises | wsqu | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def asqu(self):
'''Stephens 1974, does not have p-value formula for A^2'''
nobs = self.nobs
cdfvals = self.cdfvals
asqu = -((2. * np.arange(1., nobs+1) - 1) *
(np.log(cdfvals) + np.log(1-cdfvals[::-1]) )).sum()/nobs - nobs
return asqu | Stephens 1974, does not have p-value formula for A^2 | asqu | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def asquare(cdfvals, axis=0):
'''vectorized Anderson Darling A^2, Stephens 1974'''
ndim = len(cdfvals.shape)
nobs = cdfvals.shape[axis]
slice_reverse = [slice(None)] * ndim #might make copy if not specific axis???
islice = [None] * ndim
islice[axis] = slice(None)
slice_reverse[axis] = slice(None, None, -1)
asqu = -((2. * np.arange(1., nobs+1)[tuple(islice)] - 1) *
(np.log(cdfvals) + np.log(1-cdfvals[tuple(slice_reverse)]))/nobs).sum(axis) \
- nobs
return asqu | vectorized Anderson Darling A^2, Stephens 1974 | asquare | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def bootstrap(distr, args=(), nobs=200, nrep=100, value=None, batch_size=None):
'''Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
assumes vectorized fit_vec method,
builds and analyses (nobs, nrep) sample in one step
rename function to less generic
this works also with nrep=1
'''
#signature similar to kstest ?
#delegate to fn ?
#rvs_kwds = {'size':(nobs, nrep)}
#rvs_kwds.update(kwds)
#it will be better to build a separate batch function that calls bootstrap
#keep batch if value is true, but batch iterate from outside if stat is returned
if batch_size is not None:
if value is None:
raise ValueError('using batching requires a value')
n_batch = int(np.ceil(nrep/float(batch_size)))
count = 0
for irep in range(n_batch):
rvs = distr.rvs(args, **{'size':(batch_size, nobs)})
params = distr.fit_vec(rvs, axis=1)
params = lmap(lambda x: np.expand_dims(x, 1), params)
cdfvals = np.sort(distr.cdf(rvs, params), axis=1)
stat = asquare(cdfvals, axis=1)
count += (stat >= value).sum()
return count / float(n_batch * batch_size)
else:
#rvs = distr.rvs(args, **kwds) #extension to distribution kwds ?
rvs = distr.rvs(args, **{'size':(nrep, nobs)})
params = distr.fit_vec(rvs, axis=1)
params = lmap(lambda x: np.expand_dims(x, 1), params)
cdfvals = np.sort(distr.cdf(rvs, params), axis=1)
stat = asquare(cdfvals, axis=1)
if value is None: #return all bootstrap results
stat_sorted = np.sort(stat)
return stat_sorted
else: #calculate and return specific p-value
return (stat >= value).mean() | Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
assumes vectorized fit_vec method,
builds and analyses (nobs, nrep) sample in one step
rename function to less generic
this works also with nrep=1 | bootstrap | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def bootstrap2(value, distr, args=(), nobs=200, nrep=100):
'''Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
non vectorized, loops over all parametric bootstrap replications and calculates
and returns specific p-value,
rename function to less generic
'''
#signature similar to kstest ?
#delegate to fn ?
#rvs_kwds = {'size':(nobs, nrep)}
#rvs_kwds.update(kwds)
count = 0
for irep in range(nrep):
#rvs = distr.rvs(args, **kwds) #extension to distribution kwds ?
rvs = distr.rvs(args, **{'size':nobs})
params = distr.fit_vec(rvs)
cdfvals = np.sort(distr.cdf(rvs, params))
stat = asquare(cdfvals, axis=0)
count += (stat >= value)
return count * 1. / nrep | Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
non vectorized, loops over all parametric bootstrap replications and calculates
and returns specific p-value,
rename function to less generic | bootstrap2 | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def mutualinfo_kde(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
yx = np.vstack((y,x))
kde_x = gaussian_kde(x)(x)
kde_y = gaussian_kde(y)(y)
kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_yx) - np.log(kde_x) - np.log(kde_y)
mi = mi_obs.sum() / nobs
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi | mutual information of two random variables estimated with kde | mutualinfo_kde | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_measures.py | BSD-3-Clause |
def mutualinfo_binned(y, x, bins, normed=True):
'''mutual information of two random variables estimated with kde
Notes
-----
bins='auto' selects the number of bins so that approximately 5 observations
are expected to be in each bin under the assumption of independence. This
follows roughly the description in Kahn et al. 2007
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
## fyx, binsy, binsx = np.histogram2d(y, x, bins=bins)
## fx, binsx_ = np.histogram(x, bins=binsx)
## fy, binsy_ = np.histogram(y, bins=binsy)
if bins == 'auto':
ys = np.sort(y)
xs = np.sort(x)
#quantiles = np.array([0,0.25, 0.4, 0.6, 0.75, 1])
qbin_sqr = np.sqrt(5./nobs)
quantiles = np.linspace(0, 1, 1./qbin_sqr)
quantile_index = ((nobs-1)*quantiles).astype(int)
#move edges so that they do not coincide with an observation
shift = 1e-6 + np.ones(quantiles.shape)
shift[0] -= 2*1e-6
binsy = ys[quantile_index] + shift
binsx = xs[quantile_index] + shift
elif np.size(bins) == 1:
binsy = bins
binsx = bins
elif (len(bins) == 2):
binsy, binsx = bins
## if np.size(bins[0]) == 1:
## binsx = bins[0]
## if np.size(bins[1]) == 1:
## binsx = bins[1]
fx, binsx = np.histogram(x, bins=binsx)
fy, binsy = np.histogram(y, bins=binsy)
fyx, binsy, binsx = np.histogram2d(y, x, bins=(binsy, binsx))
pyx = fyx * 1. / nobs
px = fx * 1. / nobs
py = fy * 1. / nobs
mi_obs = pyx * (np.log(pyx+1e-10) - np.log(py)[:,None] - np.log(px))
mi = mi_obs.sum()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed, (pyx, py, px, binsy, binsx), mi_obs
else:
return mi | mutual information of two random variables estimated with kde
Notes
-----
bins='auto' selects the number of bins so that approximately 5 observations
are expected to be in each bin under the assumption of independence. This
follows roughly the description in Kahn et al. 2007 | mutualinfo_binned | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_measures.py | BSD-3-Clause |
def __init__(self, mixing_dist, base_dist, bd_args_func, bd_kwds_func,
cutoff=1e-3):
'''create a mixture distribution
Parameters
----------
mixing_dist : discrete frozen distribution
mixing distribution
base_dist : continuous distribution
parametrized distributions in the mixture
bd_args_func : callable
function that builds the tuple of args for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty tuple or
a tuple of arrays.
bd_kwds_func : callable
function that builds the dictionary of kwds for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty dictionary or
a dictionary with arrays as values.
cutoff : float
If the mixing distribution has infinite support, then the
distribution is truncated with approximately (subject to integer
conversion) the cutoff probability in the missing tail. Random
draws that are outside the truncated range are clipped, that is
assigned to the highest or lowest value in the truncated support.
'''
self.mixing_dist = mixing_dist
self.base_dist = base_dist
#self.bd_args = bd_args
if not np.isneginf(mixing_dist.dist.a):
lower = mixing_dist.dist.a
else:
lower = mixing_dist.ppf(1e-4)
if not np.isposinf(mixing_dist.dist.b):
upper = mixing_dist.dist.b
else:
upper = mixing_dist.isf(1e-4)
self.ma = lower
self.mb = upper
mixing_support = np.arange(lower, upper+1)
self.mixing_probs = mixing_dist.pmf(mixing_support)
self.bd_args = bd_args_func(mixing_support)
self.bd_kwds = bd_kwds_func(mixing_support) | create a mixture distribution
Parameters
----------
mixing_dist : discrete frozen distribution
mixing distribution
base_dist : continuous distribution
parametrized distributions in the mixture
bd_args_func : callable
function that builds the tuple of args for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty tuple or
a tuple of arrays.
bd_kwds_func : callable
function that builds the dictionary of kwds for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty dictionary or
a dictionary with arrays as values.
cutoff : float
If the mixing distribution has infinite support, then the
distribution is truncated with approximately (subject to integer
conversion) the cutoff probability in the missing tail. Random
draws that are outside the truncated range are clipped, that is
assigned to the highest or lowest value in the truncated support. | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/otherdist.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/otherdist.py | BSD-3-Clause |
def _get_clip_lower(self, kwds):
'''helper method to get clip_lower from kwds or attribute
'''
if 'clip_lower' not in kwds:
clip_lower = self.clip_lower
else:
clip_lower = kwds.pop('clip_lower')
return clip_lower, kwds | helper method to get clip_lower from kwds or attribute | _get_clip_lower | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/otherdist.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/otherdist.py | BSD-3-Clause |
def chi2_pdf(self, x, df):
'''pdf of chi-square distribution'''
#from scipy.stats.distributions
Px = x**(df/2.0-1)*np.exp(-x/2.0)
Px /= special.gamma(df/2.0)* 2**(df/2.0)
return Px | pdf of chi-square distribution | chi2_pdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/multivariate.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/multivariate.py | BSD-3-Clause |
def mvstdtprob(a, b, R, df, ieps=1e-5, quadkwds=None, mvstkwds=None):
"""
Probability of rectangular area of standard t distribution
assumes mean is zero and R is correlation matrix
Notes
-----
This function does not calculate the estimate of the combined error
between the underlying multivariate normal probability calculations
and the integration.
"""
kwds = dict(args=(a, b, R, df), epsabs=1e-4, epsrel=1e-2, limit=150)
if quadkwds is not None:
kwds.update(quadkwds)
lower, upper = chi.ppf([ieps, 1 - ieps], df)
res, err = integrate.quad(funbgh2, lower, upper, **kwds)
prob = res * bghfactor(df)
return prob | Probability of rectangular area of standard t distribution
assumes mean is zero and R is correlation matrix
Notes
-----
This function does not calculate the estimate of the combined error
between the underlying multivariate normal probability calculations
and the integration. | mvstdtprob | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/multivariate.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/multivariate.py | BSD-3-Clause |
def multivariate_t_rvs(m, S, df=np.inf, n=1):
'''generate random variables of multivariate t distribution
Parameters
----------
m : array_like
mean of random variable, length determines dimension of random variable
S : array_like
square array of covariance matrix
df : int or float
degrees of freedom
n : int
number of observations, return random array will be (n, len(m))
Returns
-------
rvs : ndarray, (n, len(m))
each row is an independent draw of a multivariate t distributed
random variable
'''
m = np.asarray(m)
d = len(m)
if df == np.inf:
x = np.ones(n)
else:
x = np.random.chisquare(df, n)/df
z = np.random.multivariate_normal(np.zeros(d),S,(n,))
return m + z/np.sqrt(x)[:,None] # same output format as random.multivariate_normal | generate random variables of multivariate t distribution
Parameters
----------
m : array_like
mean of random variable, length determines dimension of random variable
S : array_like
square array of covariance matrix
df : int or float
degrees of freedom
n : int
number of observations, return random array will be (n, len(m))
Returns
-------
rvs : ndarray, (n, len(m))
each row is an independent draw of a multivariate t distributed
random variable | multivariate_t_rvs | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/multivariate.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/multivariate.py | BSD-3-Clause |
def prob_bv_rectangle(lower, upper, cdf):
'''helper function for probability of a rectangle in a bivariate distribution
Parameters
----------
lower : array_like
tuple of lower integration bounds
upper : array_like
tuple of upper integration bounds
cdf : callable
cdf(x,y), cumulative distribution function of bivariate distribution
how does this generalize to more than 2 variates ?
'''
probuu = cdf(*upper)
probul = cdf(upper[0], lower[1])
problu = cdf(lower[0], upper[1])
probll = cdf(*lower)
return probuu - probul - problu + probll | helper function for probability of a rectangle in a bivariate distribution
Parameters
----------
lower : array_like
tuple of lower integration bounds
upper : array_like
tuple of upper integration bounds
cdf : callable
cdf(x,y), cumulative distribution function of bivariate distribution
how does this generalize to more than 2 variates ? | prob_bv_rectangle | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/quantize.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/quantize.py | BSD-3-Clause |
def prob_mv_grid(bins, cdf, axis=-1):
'''helper function for probability of a rectangle grid in a multivariate distribution
how does this generalize to more than 2 variates ?
bins : tuple
tuple of bin edges, currently it is assumed that they broadcast
correctly
'''
if not isinstance(bins, np.ndarray):
bins = lmap(np.asarray, bins)
n_dim = len(bins)
bins_ = []
#broadcast if binedges are 1d
if all(lmap(np.ndim, bins) == np.ones(n_dim)):
for d in range(n_dim):
sl = [None]*n_dim
sl[d] = slice(None)
bins_.append(bins[d][sl])
else: #assume it is already correctly broadcasted
n_dim = bins.shape[0]
bins_ = bins
print(len(bins))
cdf_values = cdf(bins_)
probs = cdf_values.copy()
for d in range(n_dim):
probs = np.diff(probs, axis=d)
return probs | helper function for probability of a rectangle grid in a multivariate distribution
how does this generalize to more than 2 variates ?
bins : tuple
tuple of bin edges, currently it is assumed that they broadcast
correctly | prob_mv_grid | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/quantize.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/quantize.py | BSD-3-Clause |
def prob_quantize_cdf(binsx, binsy, cdf):
'''quantize a continuous distribution given by a cdf
Parameters
----------
binsx : array_like, 1d
binedges
'''
binsx = np.asarray(binsx)
binsy = np.asarray(binsy)
nx = len(binsx) - 1
ny = len(binsy) - 1
probs = np.nan * np.ones((nx, ny)) #np.empty(nx,ny)
cdf_values = cdf(binsx[:,None], binsy)
def cdf_func(x, y):
return cdf_values[x, y]
for xind in range(1, nx+1):
for yind in range(1, ny+1):
upper = (xind, yind)
lower = (xind-1, yind-1)
#print upper,lower,
probs[xind-1,yind-1] = prob_bv_rectangle(lower, upper, cdf_func)
assert not np.isnan(probs).any()
return probs | quantize a continuous distribution given by a cdf
Parameters
----------
binsx : array_like, 1d
binedges | prob_quantize_cdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/quantize.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/quantize.py | BSD-3-Clause |
def prob_quantize_cdf_old(binsx, binsy, cdf):
'''quantize a continuous distribution given by a cdf
old version without precomputing cdf values
Parameters
----------
binsx : array_like, 1d
binedges
'''
binsx = np.asarray(binsx)
binsy = np.asarray(binsy)
nx = len(binsx) - 1
ny = len(binsy) - 1
probs = np.nan * np.ones((nx, ny)) #np.empty(nx,ny)
for xind in range(1, nx+1):
for yind in range(1, ny+1):
upper = (binsx[xind], binsy[yind])
lower = (binsx[xind-1], binsy[yind-1])
#print upper,lower,
probs[xind-1,yind-1] = prob_bv_rectangle(lower, upper, cdf)
assert not np.isnan(probs).any()
return probs | quantize a continuous distribution given by a cdf
old version without precomputing cdf values
Parameters
----------
binsx : array_like, 1d
binedges | prob_quantize_cdf_old | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/quantize.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/quantize.py | BSD-3-Clause |
def expect_mc(dist, func=lambda x: 1, size=50000):
'''calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
number of random samples to use in the Monte Carlo integration,
Notes
-----
this does not batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
integrate probability that both observations are negative
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc(mvn, lambda x: (x<np.array([0,0])).all(-1), size=100000)
0.25306000000000001
get tail probabilities of marginal distribution (should be 0.1)
>>> c = stats.norm.isf(0.05, scale=np.sqrt(2.))
>>> expect_mc(mvn, lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09969, 0.0986 ])
or calling the method
>>> mvn.expect_mc(lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09937, 0.10075])
'''
def fun(x):
return func(x) # * dist.pdf(x)
rvs = dist.rvs(size=size)
return fun(rvs).mean(0) | calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
number of random samples to use in the Monte Carlo integration,
Notes
-----
this does not batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
integrate probability that both observations are negative
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc(mvn, lambda x: (x<np.array([0,0])).all(-1), size=100000)
0.25306000000000001
get tail probabilities of marginal distribution (should be 0.1)
>>> c = stats.norm.isf(0.05, scale=np.sqrt(2.))
>>> expect_mc(mvn, lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09969, 0.0986 ])
or calling the method
>>> mvn.expect_mc(lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09937, 0.10075]) | expect_mc | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def expect_mc_bounds(dist, func=lambda x: 1, size=50000, lower=None, upper=None,
conditional=False, overfact=1.2):
'''calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
minimum number of random samples to use in the Monte Carlo integration,
the actual number used can be larger because of oversampling.
lower : None or array_like
lower integration bounds, if None, then it is set to -inf
upper : None or array_like
upper integration bounds, if None, then it is set to +inf
conditional : bool
If true, then the expectation is conditional on being in within
[lower, upper] bounds, otherwise it is unconditional
overfact : float
oversampling factor, the actual number of random variables drawn in
each attempt are overfact * remaining draws. Extra draws are also
used in the integration.
Notes
-----
this does not batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.ones(x.shape[0]),
lower=[-10,-10],upper=[0,0])
0.24990416666666668
get 3 marginal moments with one integration
>>> mvn = mve.MVNormal([0,0],1.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.dstack([x, x**2, x**3, x**4]),
lower=[-np.inf,-np.inf], upper=[np.inf,np.inf])
array([[ 2.88629497e-03, 9.96706297e-01, -2.51005344e-03,
2.95240921e+00],
[ -5.48020088e-03, 9.96004409e-01, -2.23803072e-02,
2.96289203e+00]])
>>> from scipy import stats
>>> [stats.norm.moment(i) for i in [1,2,3,4]]
[0.0, 1.0, 0.0, 3.0]
'''
#call rvs once to find length of random vector
rvsdim = dist.rvs(size=1).shape[-1]
if lower is None:
lower = -np.inf * np.ones(rvsdim)
else:
lower = np.asarray(lower)
if upper is None:
upper = np.inf * np.ones(rvsdim)
else:
upper = np.asarray(upper)
def fun(x):
return func(x) # * dist.pdf(x)
rvsli = []
used = 0 #remain = size #inplace changes size
total = 0
while True:
remain = size - used #just a temp variable
rvs = dist.rvs(size=int(remain * overfact))
total += int(size * overfact)
rvsok = rvs[((rvs >= lower) & (rvs <= upper)).all(-1)]
#if rvsok.ndim == 1: #possible shape problems if only 1 random vector
rvsok = np.atleast_2d(rvsok)
used += rvsok.shape[0]
rvsli.append(rvsok) #[:remain]) use extras instead
print(used)
if used >= size:
break
rvs = np.vstack(rvsli)
print(rvs.shape)
assert used == rvs.shape[0] #saftey check
mean_conditional = fun(rvs).mean(0)
if conditional:
return mean_conditional
else:
return mean_conditional * (used * 1. / total) | calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
minimum number of random samples to use in the Monte Carlo integration,
the actual number used can be larger because of oversampling.
lower : None or array_like
lower integration bounds, if None, then it is set to -inf
upper : None or array_like
upper integration bounds, if None, then it is set to +inf
conditional : bool
If true, then the expectation is conditional on being in within
[lower, upper] bounds, otherwise it is unconditional
overfact : float
oversampling factor, the actual number of random variables drawn in
each attempt are overfact * remaining draws. Extra draws are also
used in the integration.
Notes
-----
this does not batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.ones(x.shape[0]),
lower=[-10,-10],upper=[0,0])
0.24990416666666668
get 3 marginal moments with one integration
>>> mvn = mve.MVNormal([0,0],1.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.dstack([x, x**2, x**3, x**4]),
lower=[-np.inf,-np.inf], upper=[np.inf,np.inf])
array([[ 2.88629497e-03, 9.96706297e-01, -2.51005344e-03,
2.95240921e+00],
[ -5.48020088e-03, 9.96004409e-01, -2.23803072e-02,
2.96289203e+00]])
>>> from scipy import stats
>>> [stats.norm.moment(i) for i in [1,2,3,4]]
[0.0, 1.0, 0.0, 3.0] | expect_mc_bounds | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def bivariate_normal(x, mu, cov):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
X, Y = np.transpose(x)
mux, muy = mu
sigmax, sigmaxy, tmp, sigmay = np.ravel(cov)
sigmax, sigmay = np.sqrt(sigmax), np.sqrt(sigmay)
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom | Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld. | bivariate_normal | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def kl(self, other):
'''Kullback-Leibler divergence between this and another distribution
int f(x) (log f(x) - log g(x)) dx
where f is the pdf of self, and g is the pdf of other
uses double integration with scipy.integrate.dblquad
limits currently hardcoded
'''
def fun(x):
return self.logpdf(x) - other.logpdf(x)
return self.expect(fun) | Kullback-Leibler divergence between this and another distribution
int f(x) (log f(x) - log g(x)) dx
where f is the pdf of self, and g is the pdf of other
uses double integration with scipy.integrate.dblquad
limits currently hardcoded | kl | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def __init__(self, mean, sigma, *args, **kwds):
'''initialize instance
Parameters
----------
mean : array_like
parameter mu (might be renamed), for symmetric distributions this
is the mean
sigma : array_like, 2d
dispersion matrix, covariance matrix in normal distribution, but
only proportional to covariance matrix in t distribution
args : list
distribution specific arguments, e.g. df for t distribution
kwds : dict
currently not used
'''
self.extra_args = []
self.mean = np.asarray(mean)
self.sigma = sigma = np.asarray(sigma)
sigma = np.squeeze(sigma)
self.nvars = nvars = len(mean)
#self.covchol = np.linalg.cholesky(sigma)
#in the following sigma is original, self.sigma is full matrix
if sigma.shape == ():
#iid
self.sigma = np.eye(nvars) * sigma
self.sigmainv = np.eye(nvars) / sigma
self.cholsigmainv = np.eye(nvars) / np.sqrt(sigma)
elif (sigma.ndim == 1) and (len(sigma) == nvars):
#independent heteroskedastic
self.sigma = np.diag(sigma)
self.sigmainv = np.diag(1. / sigma)
self.cholsigmainv = np.diag( 1. / np.sqrt(sigma))
elif sigma.shape == (nvars, nvars): #python tuple comparison
#general
self.sigmainv = np.linalg.pinv(sigma)
self.cholsigmainv = np.linalg.cholesky(self.sigmainv).T
else:
raise ValueError('sigma has invalid shape')
#store logdetsigma for logpdf
self.logdetsigma = np.log(np.linalg.det(self.sigma)) | initialize instance
Parameters
----------
mean : array_like
parameter mu (might be renamed), for symmetric distributions this
is the mean
sigma : array_like, 2d
dispersion matrix, covariance matrix in normal distribution, but
only proportional to covariance matrix in t distribution
args : list
distribution specific arguments, e.g. df for t distribution
kwds : dict
currently not used | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def rvs(self, size=1):
'''random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
'''
raise NotImplementedError | random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension | rvs | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
does not work now because of dot in whiten
'''
raise NotImplementedError | logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
does not work now because of dot in whiten | logpdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def cdf(self, x, **kwds):
'''cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector
'''
raise NotImplementedError | cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector | cdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def affine_transformed(self, shift, scale_matrix):
'''affine transformation define in subclass because of distribution
specific restrictions'''
#implemented in subclass at least for now
raise NotImplementedError | affine transformation define in subclass because of distribution
specific restrictions | affine_transformed | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def whiten(self, x):
"""
whiten the data by linear transformation
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholsigmainv.T)
Notes
-----
This only does rescaling, it does not subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable.
"""
x = np.asarray(x)
return np.dot(x, self.cholsigmainv.T) | whiten the data by linear transformation
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholsigmainv.T)
Notes
-----
This only does rescaling, it does not subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable. | whiten | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def pdf(self, x):
'''probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
pdf : float or array
probability density value of each random vector
'''
return np.exp(self.logpdf(x)) | probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
pdf : float or array
probability density value of each random vector | pdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def standardize(self, x):
'''standardize the random variable, i.e. subtract mean and whiten
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x - self.mean, self.cholsigmainv.T)
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean.
'''
return self.whiten(x - self.mean) | standardize the random variable, i.e. subtract mean and whiten
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x - self.mean, self.cholsigmainv.T)
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean. | standardize | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def standardized(self):
'''return new standardized MVNormal instance
'''
return self.affine_transformed(-self.mean, self.cholsigmainv) | return new standardized MVNormal instance | standardized | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def normalize(self, x):
'''normalize the random variable, i.e. subtract mean and rescale
The distribution will have zero mean and sigma equal to correlation
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
(x - self.mean)/std_sigma
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean.
'''
std_ = np.atleast_2d(self.std_sigma)
return (x - self.mean)/std_ #/std_.T | normalize the random variable, i.e. subtract mean and rescale
The distribution will have zero mean and sigma equal to correlation
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
(x - self.mean)/std_sigma
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean. | normalize | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def normalized(self, demeaned=True):
'''return a normalized distribution where sigma=corr
if demeaned is True, then mean will be set to zero
'''
if demeaned:
mean_new = np.zeros_like(self.mean)
else:
mean_new = self.mean / self.std_sigma
sigma_new = self.corr
args = [getattr(self, ea) for ea in self.extra_args]
return self.__class__(mean_new, sigma_new, *args) | return a normalized distribution where sigma=corr
if demeaned is True, then mean will be set to zero | normalized | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def normalized2(self, demeaned=True):
'''return a normalized distribution where sigma=corr
second implementation for testing affine transformation
'''
if demeaned:
shift = -self.mean
else:
shift = self.mean * (1. / self.std_sigma - 1.)
return self.affine_transformed(shift, np.diag(1. / self.std_sigma)) | return a normalized distribution where sigma=corr
second implementation for testing affine transformation | normalized2 | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def std(self):
'''standard deviation, square root of diagonal elements of cov
'''
return np.sqrt(np.diag(self.cov)) | standard deviation, square root of diagonal elements of cov | std | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def std_sigma(self):
'''standard deviation, square root of diagonal elements of sigma
'''
return np.sqrt(np.diag(self.sigma)) | standard deviation, square root of diagonal elements of sigma | std_sigma | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def corr(self):
'''correlation matrix'''
return self.cov / np.outer(self.std, self.std) | correlation matrix | corr | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def marginal(self, indices):
'''return marginal distribution for variables given by indices
this should be correct for normal and t distribution
Parameters
----------
indices : array_like, int
list of indices of variables in the marginal distribution
Returns
-------
mvdist : instance
new instance of the same multivariate distribution class that
contains the marginal distribution of the variables given in
indices
'''
indices = np.asarray(indices)
mean_new = self.mean[indices]
sigma_new = self.sigma[indices[:,None], indices]
args = [getattr(self, ea) for ea in self.extra_args]
return self.__class__(mean_new, sigma_new, *args) | return marginal distribution for variables given by indices
this should be correct for normal and t distribution
Parameters
----------
indices : array_like, int
list of indices of variables in the marginal distribution
Returns
-------
mvdist : instance
new instance of the same multivariate distribution class that
contains the marginal distribution of the variables given in
indices | marginal | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def whiten(self, x):
"""
whiten the data by linear transformation
Parameters
----------
X : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholcovinv.T)
Notes
-----
This only does rescaling, it does not subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable.
"""
x = np.asarray(x)
if np.any(self.cov):
#return np.dot(self.cholcovinv, x)
return np.dot(x, self.cholcovinv.T)
else:
return x | whiten the data by linear transformation
Parameters
----------
X : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholcovinv.T)
Notes
-----
This only does rescaling, it does not subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable. | whiten | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def rvs(self, size=1):
'''random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
Notes
-----
uses numpy.random.multivariate_normal directly
'''
return np.random.multivariate_normal(self.mean, self.cov, size=size) | random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
Notes
-----
uses numpy.random.multivariate_normal directly | rvs | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def cov(self):
'''covariance matrix'''
return self.sigma | covariance matrix | cov | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def affine_transformed(self, shift, scale_matrix):
'''return distribution of an affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVNormal
instance of multivariate normal distribution given by affine
transformation
Notes
-----
the affine transformation is defined by
y = a + B x
where a is shift,
B is a scale matrix for the linear transformation
Notes
-----
This should also work to select marginal distributions, but not
tested for this case yet.
currently only tested because it's called by standardized
'''
B = scale_matrix #tmp variable
mean_new = np.dot(B, self.mean) + shift
sigma_new = np.dot(np.dot(B, self.sigma), B.T)
return MVNormal(mean_new, sigma_new) | return distribution of an affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVNormal
instance of multivariate normal distribution given by affine
transformation
Notes
-----
the affine transformation is defined by
y = a + B x
where a is shift,
B is a scale matrix for the linear transformation
Notes
-----
This should also work to select marginal distributions, but not
tested for this case yet.
currently only tested because it's called by standardized | affine_transformed | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def rvs(self, size=1):
'''random variables with Student T distribution
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
- TODO: Not sure if this works for size tuples with len>1.
Notes
-----
generated as a chi-square mixture of multivariate normal random
variables.
does this require df>2 ?
'''
from .multivariate import multivariate_t_rvs
return multivariate_t_rvs(self.mean, self.sigma, df=self.df, n=size) | random variables with Student T distribution
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
- TODO: Not sure if this works for size tuples with len>1.
Notes
-----
generated as a chi-square mixture of multivariate normal random
variables.
does this require df>2 ? | rvs | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
'''
x = np.asarray(x)
df = self.df
nvars = self.nvars
x_whitened = self.whiten(x - self.mean) #should be float
llf = - nvars * np_log(df * np_pi)
llf -= self.logdetsigma
llf -= (df + nvars) * np_log(1 + np.sum(x_whitened**2,-1) / df)
llf *= 0.5
llf += sps_gamln((df + nvars) / 2.) - sps_gamln(df / 2.)
return llf | logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector | logpdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def cov(self):
'''covariance matrix
The covariance matrix for the t distribution does not exist for df<=2,
and is equal to sigma * df/(df-2) for df>2
'''
if self.df <= 2:
return np.nan * np.ones_like(self.sigma)
else:
return self.df / (self.df - 2.) * self.sigma | covariance matrix
The covariance matrix for the t distribution does not exist for df<=2,
and is equal to sigma * df/(df-2) for df>2 | cov | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def affine_transformed(self, shift, scale_matrix):
'''return distribution of a full rank affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVT
instance of multivariate t distribution given by affine
transformation
Notes
-----
This checks for eigvals<=0, so there are possible problems for cases
with positive eigenvalues close to zero.
see: http://www.statlect.com/mcdstu1.htm
I'm not sure about general case, non-full rank transformation are not
multivariate t distributed.
y = a + B x
where a is shift,
B is full rank scale matrix with same dimension as sigma
'''
#full rank method could also be in elliptical and called with super
#after the rank check
B = scale_matrix #tmp variable as shorthand
if not B.shape == (self.nvars, self.nvars):
if (np.linalg.eigvals(B) <= 0).any():
raise ValueError('affine transform has to be full rank')
mean_new = np.dot(B, self.mean) + shift
sigma_new = np.dot(np.dot(B, self.sigma), B.T)
return MVT(mean_new, sigma_new, self.df) | return distribution of a full rank affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVT
instance of multivariate t distribution given by affine
transformation
Notes
-----
This checks for eigvals<=0, so there are possible problems for cases
with positive eigenvalues close to zero.
see: http://www.statlect.com/mcdstu1.htm
I'm not sure about general case, non-full rank transformation are not
multivariate t distributed.
y = a + B x
where a is shift,
B is full rank scale matrix with same dimension as sigma | affine_transformed | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def mc2mvsk(args):
'''convert central moments to mean, variance, skew, kurtosis
'''
mc, mc2, mc3, mc4 = args
skew = np.divide(mc3, mc2**1.5)
kurt = np.divide(mc4, mc2**2.0) - 3.0
return (mc, mc2, skew, kurt) | convert central moments to mean, variance, skew, kurtosis | mc2mvsk | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/tests/check_moments.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/tests/check_moments.py | BSD-3-Clause |
def nct_kurt_bug():
'''test for incorrect kurtosis of nct
D. Hogben, R. S. Pinkham, M. B. Wilk: The Moments of the Non-Central
t-DistributionAuthor(s): Biometrika, Vol. 48, No. 3/4 (Dec., 1961),
pp. 465-468
'''
from numpy.testing import assert_almost_equal
mvsk_10_1 = (1.08372, 1.325546, 0.39993, 1.2499424941142943)
assert_almost_equal(stats.nct.stats(10, 1, moments='mvsk'), mvsk_10_1, decimal=6)
c1=np.array([1.08372])
c2=np.array([.0755460, 1.25000])
c3 = np.array([.0297802, .580566])
np.array([0.0425458, 1.17491, 6.25])
#calculation for df=10, for arbitrary nc
nc = 1
mc1 = c1.item()
mc2 = (c2*nc**np.array([2,0])).sum()
mc3 = (c3*nc**np.array([3,1])).sum()
mc4 = np.array([0.0425458, 1.17491, 6.25])
mc2mvsk((mc1,mc2,mc3,mc4)) | test for incorrect kurtosis of nct
D. Hogben, R. S. Pinkham, M. B. Wilk: The Moments of the Non-Central
t-DistributionAuthor(s): Biometrika, Vol. 48, No. 3/4 (Dec., 1961),
pp. 465-468 | nct_kurt_bug | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/tests/check_moments.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/tests/check_moments.py | BSD-3-Clause |
def calc_factors(self, x=None, keepdim=0, addconst=True):
'''get factor decomposition of exogenous variables
This uses principal component analysis to obtain the factors. The number
of factors kept is the maximum that will be considered in the regression.
'''
if x is None:
x = self.exog
else:
x = np.asarray(x)
xred, fact, evals, evecs = pca(x, keepdim=keepdim, normalize=1)
self.exog_reduced = xred
#self.factors = fact
if addconst:
self.factors = sm.add_constant(fact, prepend=True)
self.hasconst = 1 #needs to be int
else:
self.factors = fact
self.hasconst = 0 #needs to be int
self.evals = evals
self.evecs = evecs | get factor decomposition of exogenous variables
This uses principal component analysis to obtain the factors. The number
of factors kept is the maximum that will be considered in the regression. | calc_factors | python | statsmodels/statsmodels | statsmodels/sandbox/datarich/factormodels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/datarich/factormodels.py | BSD-3-Clause |
def fit_find_nfact(self, maxfact=None, skip_crossval=True, cv_iter=None):
'''estimate the model and selection criteria for up to maxfact factors
The selection criteria that are calculated are AIC, BIC, and R2_adj. and
additionally cross-validation prediction error sum of squares if `skip_crossval`
is false. Cross-validation is not used by default because it can be
time consuming to calculate.
By default the cross-validation method is Leave-one-out on the full dataset.
A different cross-validation sample can be specified as an argument to
cv_iter.
Results are attached in `results_find_nfact`
'''
#print 'OLS on Factors'
if not hasattr(self, 'factors'):
self.calc_factors()
hasconst = self.hasconst
if maxfact is None:
maxfact = self.factors.shape[1] - hasconst
if (maxfact+hasconst) < 1:
raise ValueError('nothing to do, number of factors (incl. constant) should ' +
'be at least 1')
#temporary safety
maxfact = min(maxfact, 10)
y0 = self.endog
results = []
#xred, fact, eva, eve = pca(x0, keepdim=0, normalize=1)
for k in range(1, maxfact+hasconst): #k includes now the constnat
#xred, fact, eva, eve = pca(x0, keepdim=k, normalize=1)
# this is faster and same result
fact = self.factors[:,:k]
res = sm.OLS(y0, fact).fit()
## print 'k =', k
## print res.params
## print 'aic: ', res.aic
## print 'bic: ', res.bic
## print 'llf: ', res.llf
## print 'R2 ', res.rsquared
## print 'R2 adj', res.rsquared_adj
if not skip_crossval:
if cv_iter is None:
cv_iter = LeaveOneOut(len(y0))
prederr2 = 0.
for inidx, outidx in cv_iter:
res_l1o = sm.OLS(y0[inidx], fact[inidx,:]).fit()
#print data.endog[outidx], res.model.predict(data.exog[outidx,:]),
prederr2 += (y0[outidx] -
res_l1o.model.predict(res_l1o.params, fact[outidx,:]))**2.
else:
prederr2 = np.nan
results.append([k, res.aic, res.bic, res.rsquared_adj, prederr2])
self.results_find_nfact = results = np.array(results)
self.best_nfact = np.r_[(np.argmin(results[:,1:3],0), np.argmax(results[:,3],0),
np.argmin(results[:,-1],0))] | estimate the model and selection criteria for up to maxfact factors
The selection criteria that are calculated are AIC, BIC, and R2_adj. and
additionally cross-validation prediction error sum of squares if `skip_crossval`
is false. Cross-validation is not used by default because it can be
time consuming to calculate.
By default the cross-validation method is Leave-one-out on the full dataset.
A different cross-validation sample can be specified as an argument to
cv_iter.
Results are attached in `results_find_nfact` | fit_find_nfact | python | statsmodels/statsmodels | statsmodels/sandbox/datarich/factormodels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/datarich/factormodels.py | BSD-3-Clause |
def summary_find_nfact(self):
'''provides a summary for the selection of the number of factors
Returns
-------
sumstr : str
summary of the results for selecting the number of factors
'''
if not hasattr(self, 'results_find_nfact'):
self.fit_find_nfact()
results = self.results_find_nfact
sumstr = ''
sumstr += '\n' + 'Best result for k, by AIC, BIC, R2_adj, L1O'
# best = np.r_[(np.argmin(results[:,1:3],0), np.argmax(results[:,3],0),
# np.argmin(results[:,-1],0))]
sumstr += '\n' + ' '*19 + '%5d %4d %6d %5d' % tuple(self.best_nfact)
from statsmodels.iolib.table import SimpleTable
headers = 'k, AIC, BIC, R2_adj, L1O'.split(', ')
numformat = ['%6d'] + ['%10.3f']*4 #'%10.4f'
txt_fmt1 = dict(data_fmts = numformat)
tabl = SimpleTable(results, headers, None, txt_fmt=txt_fmt1)
sumstr += '\n' + "PCA regression on simulated data,"
sumstr += '\n' + "DGP: 2 factors and 4 explanatory variables"
sumstr += '\n' + tabl.__str__()
sumstr += '\n' + "Notes: k is number of components of PCA,"
sumstr += '\n' + " constant is added additionally"
sumstr += '\n' + " k=0 means regression on constant only"
sumstr += '\n' + " L1O: sum of squared prediction errors for leave-one-out"
return sumstr | provides a summary for the selection of the number of factors
Returns
-------
sumstr : str
summary of the results for selecting the number of factors | summary_find_nfact | python | statsmodels/statsmodels | statsmodels/sandbox/datarich/factormodels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/datarich/factormodels.py | BSD-3-Clause |
def _est_b_bw(self):
"""
Computes the (beta) coefficients and the bandwidths.
Minimizes ``cv_loo`` with respect to ``b`` and ``bw``.
"""
params0 = np.random.uniform(size=(self.k_linear + self.K, ))
b_bw = optimize.fmin(self.cv_loo, params0, disp=0)
b = b_bw[0 : self.k_linear]
bw = b_bw[self.k_linear:]
#bw = self._set_bw_bounds(np.asarray(bw))
return b, bw | Computes the (beta) coefficients and the bandwidths.
Minimizes ``cv_loo`` with respect to ``b`` and ``bw``. | _est_b_bw | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernel_extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernel_extras.py | BSD-3-Clause |
def cv_loo(self, params):
"""
Similar to the cross validation leave-one-out estimator.
Modified to reflect the linear components.
Parameters
----------
params : array_like
Vector consisting of the coefficients (b) and the bandwidths (bw).
The first ``k_linear`` elements are the coefficients.
Returns
-------
L : float
The value of the objective function
References
----------
See p.254 in [1]
"""
params = np.asarray(params)
b = params[0 : self.k_linear]
bw = params[self.k_linear:]
LOO_X = LeaveOneOut(self.exog)
LOO_Y = LeaveOneOut(self.endog).__iter__()
LOO_Z = LeaveOneOut(self.exog_nonparametric).__iter__()
Xb = np.dot(self.exog, b)[:,None]
L = 0
for ii, X_not_i in enumerate(LOO_X):
Y = next(LOO_Y)
Z = next(LOO_Z)
Xb_j = np.dot(X_not_i, b)[:,None]
Yx = Y - Xb_j
G = self.func(bw, endog=Yx, exog=-Z,
data_predict=-self.exog_nonparametric[ii, :])[0]
lt = Xb[ii, :] #.sum() # linear term
L += (self.endog[ii] - lt - G) ** 2
return L | Similar to the cross validation leave-one-out estimator.
Modified to reflect the linear components.
Parameters
----------
params : array_like
Vector consisting of the coefficients (b) and the bandwidths (bw).
The first ``k_linear`` elements are the coefficients.
Returns
-------
L : float
The value of the objective function
References
----------
See p.254 in [1] | cv_loo | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernel_extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernel_extras.py | BSD-3-Clause |
def fit(self, exog_predict=None, exog_nonparametric_predict=None):
"""Computes fitted values and marginal effects"""
if exog_predict is None:
exog_predict = self.exog
else:
exog_predict = _adjust_shape(exog_predict, self.k_linear)
if exog_nonparametric_predict is None:
exog_nonparametric_predict = self.exog_nonparametric
else:
exog_nonparametric_predict = _adjust_shape(exog_nonparametric_predict, self.K)
N_data_predict = np.shape(exog_nonparametric_predict)[0]
mean = np.empty((N_data_predict,))
mfx = np.empty((N_data_predict, self.K))
Y = self.endog - np.dot(exog_predict, self.b)[:,None]
for i in range(N_data_predict):
mean_mfx = self.func(self.bw, Y, self.exog_nonparametric,
data_predict=exog_nonparametric_predict[i, :])
mean[i] = mean_mfx[0]
mfx_c = np.squeeze(mean_mfx[1])
mfx[i, :] = mfx_c
return mean, mfx | Computes fitted values and marginal effects | fit | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernel_extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernel_extras.py | BSD-3-Clause |
def inner_cont(polys, lower, upper, weight=None):
'''inner product of continuous function (with weight=1)
Parameters
----------
polys : list of callables
polynomial instances
lower : float
lower integration limit
upper : float
upper integration limit
weight : callable or None
weighting function
Returns
-------
innp : ndarray
symmetric 2d square array with innerproduct of all function pairs
err : ndarray
numerical error estimate from scipy.integrate.quad, same dimension as innp
Examples
--------
>>> from scipy.special import chebyt
>>> polys = [chebyt(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 2. , 0. , -0.66666667, 0. ],
[ 0. , 0.66666667, 0. , -0.4 ],
[-0.66666667, 0. , 0.93333333, 0. ],
[ 0. , -0.4 , 0. , 0.97142857]])
'''
n_polys = len(polys)
innerprod = np.empty((n_polys, n_polys))
innerprod.fill(np.nan)
interr = np.zeros((n_polys, n_polys))
for i in range(n_polys):
for j in range(i+1):
p1 = polys[i]
p2 = polys[j]
if weight is not None:
innp, err = integrate.quad(lambda x: p1(x)*p2(x)*weight(x),
lower, upper)
else:
innp, err = integrate.quad(lambda x: p1(x)*p2(x), lower, upper)
innerprod[i,j] = innp
interr[i,j] = err
if not i == j:
innerprod[j,i] = innp
interr[j,i] = err
return innerprod, interr | inner product of continuous function (with weight=1)
Parameters
----------
polys : list of callables
polynomial instances
lower : float
lower integration limit
upper : float
upper integration limit
weight : callable or None
weighting function
Returns
-------
innp : ndarray
symmetric 2d square array with innerproduct of all function pairs
err : ndarray
numerical error estimate from scipy.integrate.quad, same dimension as innp
Examples
--------
>>> from scipy.special import chebyt
>>> polys = [chebyt(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 2. , 0. , -0.66666667, 0. ],
[ 0. , 0.66666667, 0. , -0.4 ],
[-0.66666667, 0. , 0.93333333, 0. ],
[ 0. , -0.4 , 0. , 0.97142857]]) | inner_cont | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def is_orthonormal_cont(polys, lower, upper, rtol=0, atol=1e-08):
'''check whether functions are orthonormal
Parameters
----------
polys : list of polynomials or function
Returns
-------
is_orthonormal : bool
is False if the innerproducts are not close to 0 or 1
Notes
-----
this stops as soon as the first deviation from orthonormality is found.
Examples
--------
>>> from scipy.special import chebyt
>>> polys = [chebyt(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 2. , 0. , -0.66666667, 0. ],
[ 0. , 0.66666667, 0. , -0.4 ],
[-0.66666667, 0. , 0.93333333, 0. ],
[ 0. , -0.4 , 0. , 0.97142857]])
>>> is_orthonormal_cont(polys, -1, 1, atol=1e-6)
False
>>> polys = [ChebyTPoly(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 1.00000000e+00, 0.00000000e+00, -9.31270888e-14,
0.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00,
-9.47850712e-15],
[ -9.31270888e-14, 0.00000000e+00, 1.00000000e+00,
0.00000000e+00],
[ 0.00000000e+00, -9.47850712e-15, 0.00000000e+00,
1.00000000e+00]])
>>> is_orthonormal_cont(polys, -1, 1, atol=1e-6)
True
'''
for i in range(len(polys)):
for j in range(i+1):
p1 = polys[i]
p2 = polys[j]
innerprod = integrate.quad(lambda x: p1(x)*p2(x), lower, upper)[0]
#print i,j, innerprod
if not np.allclose(innerprod, i==j, rtol=rtol, atol=atol):
return False
return True | check whether functions are orthonormal
Parameters
----------
polys : list of polynomials or function
Returns
-------
is_orthonormal : bool
is False if the innerproducts are not close to 0 or 1
Notes
-----
this stops as soon as the first deviation from orthonormality is found.
Examples
--------
>>> from scipy.special import chebyt
>>> polys = [chebyt(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 2. , 0. , -0.66666667, 0. ],
[ 0. , 0.66666667, 0. , -0.4 ],
[-0.66666667, 0. , 0.93333333, 0. ],
[ 0. , -0.4 , 0. , 0.97142857]])
>>> is_orthonormal_cont(polys, -1, 1, atol=1e-6)
False
>>> polys = [ChebyTPoly(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 1.00000000e+00, 0.00000000e+00, -9.31270888e-14,
0.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00,
-9.47850712e-15],
[ -9.31270888e-14, 0.00000000e+00, 1.00000000e+00,
0.00000000e+00],
[ 0.00000000e+00, -9.47850712e-15, 0.00000000e+00,
1.00000000e+00]])
>>> is_orthonormal_cont(polys, -1, 1, atol=1e-6)
True | is_orthonormal_cont | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def fit(self, x, polybase=None, order=5, limits=None):
'''estimate the orthogonal polynomial approximation to the density
'''
if polybase is None:
polys = self.polys[:order]
else:
self.polybase = polybase
self.polys = polys = [polybase(i) for i in range(order)]
#move to init ?
if not hasattr(self, 'offsetfac'):
self.offsetfac = polys[0].offsetfactor
xmin, xmax = x.min(), x.max()
if limits is None:
self.offset = offset = (xmax - xmin) * self.offsetfac
limits = self.limits = (xmin - offset, xmax + offset)
interval_length = limits[1] - limits[0]
xinterval = xmax - xmin
# need to cover (half-)open intervalls
self.shrink = 1. / interval_length #xinterval/interval_length
offset = (interval_length - xinterval ) / 2.
self.shift = xmin - offset
self.x = x = self._transform(x)
coeffs = [(p(x)).mean() for p in polys]
self.coeffs = coeffs
self.polys = polys
self._verify() #verify that it is a proper density
return self #coeffs, polys | estimate the orthogonal polynomial approximation to the density | fit | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def __call__(self, xeval):
'''alias for evaluate, except no order argument'''
return self.evaluate(xeval) | alias for evaluate, except no order argument | __call__ | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def _verify(self):
'''check for bona fide density correction
currently only checks that density integrates to 1
` non-negativity - NotImplementedYet
'''
#watch out for circular/recursive usage
#evaluate uses domain of data, we stay offset away from bounds
intdomain = self.limits #self.polys[0].intdomain
self._corfactor = 1./integrate.quad(self.evaluate, *intdomain)[0]
#self._corshift = 0
#self._corfactor
return self._corfactor | check for bona fide density correction
currently only checks that density integrates to 1
` non-negativity - NotImplementedYet | _verify | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def _correction(self, x):
'''bona fide density correction
affine shift of density to make it into a proper density
'''
if self._corfactor != 1:
x *= self._corfactor
if self._corshift != 0:
x += self._corshift
return x | bona fide density correction
affine shift of density to make it into a proper density | _correction | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def _transform(self, x): # limits=None):
'''transform observation to the domain of the density
uses shrink and shift attribute which are set in fit to stay
'''
#use domain from first instance
#class does not have domain self.polybase.domain[0] AttributeError
domain = self.polys[0].domain
ilen = (domain[1] - domain[0])
shift = self.shift - domain[0]/self.shrink/ilen
shrink = self.shrink * ilen
return (x - shift) * shrink | transform observation to the domain of the density
uses shrink and shift attribute which are set in fit to stay | _transform | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def getH(self):
"""Getter for kernel bandwidth, H"""
return self._H | Getter for kernel bandwidth, H | getH | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def setH(self, value):
"""Setter for kernel bandwidth, H"""
self._H = value | Setter for kernel bandwidth, H | setH | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def _kernweight(self, x ):
"""returns the kernel weight for the independent multivariate kernel"""
if isinstance( self._kernels, CustomKernel ):
## Radial case
#d = x.T * x
#x is matrix, 2d, element wise sqrt looks wrong
#d = np.sqrt( x.T * x )
x = np.asarray(x)
#d = np.sqrt( (x * x).sum(-1) )
d = (x * x).sum(-1)
return self._kernels( np.asarray(d) ) | returns the kernel weight for the independent multivariate kernel | _kernweight | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def __call__(self, x):
"""
This simply returns the value of the kernel function at x
Does the same as weight if the function is normalised
"""
return self._kernweight(x) | This simply returns the value of the kernel function at x
Does the same as weight if the function is normalised | __call__ | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def __init__(self, shape, h = 1.0, domain = None, norm = None):
"""
shape should be a function taking and returning numeric type.
For sanity it should always return positive or zero but this is not
enforced in case you want to do weird things. Bear in mind that the
statistical tests etc. may not be valid for non-positive kernels.
The bandwidth of the kernel is supplied as h.
You may specify a domain as a list of 2 values [min, max], in which case
kernel will be treated as zero outside these values. This will speed up
calculation.
You may also specify the normalisation constant for the supplied Kernel.
If you do this number will be stored and used as the normalisation
without calculation. It is recommended you do this if you know the
constant, to speed up calculation. In particular if the shape function
provided is already normalised you should provide norm = 1.0.
Warning: I think several calculations assume that the kernel is
normalized. No tests for non-normalized kernel.
"""
self._normconst = norm # a value or None, if None, then calculate
self.domain = domain
self.weights = None
if callable(shape):
self._shape = shape
else:
raise TypeError("shape must be a callable object/function")
self._h = h
self._L2Norm = None
self._kernel_var = None
self._normal_reference_constant = None
self._order = None | shape should be a function taking and returning numeric type.
For sanity it should always return positive or zero but this is not
enforced in case you want to do weird things. Bear in mind that the
statistical tests etc. may not be valid for non-positive kernels.
The bandwidth of the kernel is supplied as h.
You may specify a domain as a list of 2 values [min, max], in which case
kernel will be treated as zero outside these values. This will speed up
calculation.
You may also specify the normalisation constant for the supplied Kernel.
If you do this number will be stored and used as the normalisation
without calculation. It is recommended you do this if you know the
constant, to speed up calculation. In particular if the shape function
provided is already normalised you should provide norm = 1.0.
Warning: I think several calculations assume that the kernel is
normalized. No tests for non-normalized kernel. | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def geth(self):
"""Getter for kernel bandwidth, h"""
return self._h | Getter for kernel bandwidth, h | geth | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def seth(self, value):
"""Setter for kernel bandwidth, h"""
self._h = value | Setter for kernel bandwidth, h | seth | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.