code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def fitgmm(self, start, weights=None, optim_method='bfgs', optim_args=None):
'''estimate parameters using GMM
Parameters
----------
start : array_like
starting values for minimization
weights : ndarray
weighting matrix for moment conditions. If weights is None, then
the identity matrix is used
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option, not here ???
uses scipy.optimize.fmin
'''
## if not fixed is None: #fixed not defined in this version
## raise NotImplementedError
# TODO: should start_weights only be in `fit`
if weights is None:
weights = self.start_weights(inv=False)
if optim_args is None:
optim_args = {}
if optim_method == 'nm':
optimizer = optimize.fmin
elif optim_method == 'bfgs':
optimizer = optimize.fmin_bfgs
# TODO: add score
optim_args['fprime'] = self.score #lambda params: self.score(params, weights)
elif optim_method == 'ncg':
optimizer = optimize.fmin_ncg
optim_args['fprime'] = self.score
elif optim_method == 'cg':
optimizer = optimize.fmin_cg
optim_args['fprime'] = self.score
elif optim_method == 'fmin_l_bfgs_b':
optimizer = optimize.fmin_l_bfgs_b
optim_args['fprime'] = self.score
elif optim_method == 'powell':
optimizer = optimize.fmin_powell
elif optim_method == 'slsqp':
optimizer = optimize.fmin_slsqp
else:
raise ValueError('optimizer method not available')
if DEBUG:
print(np.linalg.det(weights))
#TODO: add other optimization options and results
return optimizer(self.gmmobjective, start, args=(weights,),
**optim_args) | estimate parameters using GMM
Parameters
----------
start : array_like
starting values for minimization
weights : ndarray
weighting matrix for moment conditions. If weights is None, then
the identity matrix is used
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option, not here ???
uses scipy.optimize.fmin | fitgmm | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def fitgmm_cu(self, start, optim_method='bfgs', optim_args=None):
'''estimate parameters using continuously updating GMM
Parameters
----------
start : array_like
starting values for minimization
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option, not here ???
uses scipy.optimize.fmin
'''
## if not fixed is None: #fixed not defined in this version
## raise NotImplementedError
if optim_args is None:
optim_args = {}
if optim_method == 'nm':
optimizer = optimize.fmin
elif optim_method == 'bfgs':
optimizer = optimize.fmin_bfgs
optim_args['fprime'] = self.score_cu
elif optim_method == 'ncg':
optimizer = optimize.fmin_ncg
else:
raise ValueError('optimizer method not available')
#TODO: add other optimization options and results
return optimizer(self.gmmobjective_cu, start, args=(), **optim_args) | estimate parameters using continuously updating GMM
Parameters
----------
start : array_like
starting values for minimization
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option, not here ???
uses scipy.optimize.fmin | fitgmm_cu | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def start_weights(self, inv=True):
"""Create identity matrix for starting weights"""
return np.eye(self.nmoms) | Create identity matrix for starting weights | start_weights | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def gmmobjective(self, params, weights):
'''
objective function for GMM minimization
Parameters
----------
params : ndarray
parameter values at which objective is evaluated
weights : ndarray
weighting matrix
Returns
-------
jval : float
value of objective function
'''
moms = self.momcond_mean(params)
return np.dot(np.dot(moms, weights), moms) | objective function for GMM minimization
Parameters
----------
params : ndarray
parameter values at which objective is evaluated
weights : ndarray
weighting matrix
Returns
-------
jval : float
value of objective function | gmmobjective | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def gmmobjective_cu(self, params, weights_method='cov',
wargs=()):
'''
objective function for continuously updating GMM minimization
Parameters
----------
params : ndarray
parameter values at which objective is evaluated
Returns
-------
jval : float
value of objective function
'''
moms = self.momcond(params)
inv_weights = self.calc_weightmatrix(moms, weights_method=weights_method,
wargs=wargs)
weights = np.linalg.pinv(inv_weights)
self._weights_cu = weights # store if we need it later
return np.dot(np.dot(moms.mean(0), weights), moms.mean(0)) | objective function for continuously updating GMM minimization
Parameters
----------
params : ndarray
parameter values at which objective is evaluated
Returns
-------
jval : float
value of objective function | gmmobjective_cu | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def momcond_mean(self, params):
'''
mean of moment conditions,
'''
momcond = self.momcond(params)
self.nobs_moms, self.k_moms = momcond.shape
return momcond.mean(0) | mean of moment conditions, | momcond_mean | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def gradient_momcond(self, params, epsilon=1e-4, centered=True):
'''gradient of moment conditions
Parameters
----------
params : ndarray
parameter at which the moment conditions are evaluated
epsilon : float
stepsize for finite difference calculation
centered : bool
This refers to the finite difference calculation. If `centered`
is true, then the centered finite difference calculation is
used. Otherwise the one-sided forward differences are used.
TODO: looks like not used yet
missing argument `weights`
'''
momcond = self.momcond_mean
# TODO: approx_fprime has centered keyword
if centered:
gradmoms = (approx_fprime(params, momcond, epsilon=epsilon) +
approx_fprime(params, momcond, epsilon=-epsilon))/2
else:
gradmoms = approx_fprime(params, momcond, epsilon=epsilon)
return gradmoms | gradient of moment conditions
Parameters
----------
params : ndarray
parameter at which the moment conditions are evaluated
epsilon : float
stepsize for finite difference calculation
centered : bool
This refers to the finite difference calculation. If `centered`
is true, then the centered finite difference calculation is
used. Otherwise the one-sided forward differences are used.
TODO: looks like not used yet
missing argument `weights` | gradient_momcond | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def score(self, params, weights, epsilon=None, centered=True):
"""Score"""
deriv = approx_fprime(params, self.gmmobjective, args=(weights,),
centered=centered, epsilon=epsilon)
return deriv | Score | score | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def score_cu(self, params, epsilon=None, centered=True):
"""Score cu"""
deriv = approx_fprime(params, self.gmmobjective_cu, args=(),
centered=centered, epsilon=epsilon)
return deriv | Score cu | score_cu | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def q(self):
"""Objective function at params"""
return self.model.gmmobjective(self.params, self.weights) | Objective function at params | q | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def jval(self):
"""nobs_moms attached by momcond_mean"""
return self.q * self.model.nobs_moms | nobs_moms attached by momcond_mean | jval | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def calc_cov_params(self, moms, gradmoms, weights=None, use_weights=False,
has_optimal_weights=True,
weights_method='cov', wargs=()):
'''calculate covariance of parameter estimates
not all options tried out yet
If weights matrix is given, then the formula use to calculate cov_params
depends on whether has_optimal_weights is true.
If no weights are given, then the weight matrix is calculated with
the given method, and has_optimal_weights is assumed to be true.
(API Note: The latter assumption could be changed if we allow for
has_optimal_weights=None.)
'''
nobs = moms.shape[0]
if weights is None:
#omegahat = self.model.calc_weightmatrix(moms, method=method, wargs=wargs)
#has_optimal_weights = True
#add other options, Barzen, ... longrun var estimators
# TODO: this might still be inv_weights after fititer
weights = self.weights
else:
pass
#omegahat = weights #2 different names used,
#TODO: this is wrong, I need an estimate for omega
if use_weights:
omegahat = weights
else:
omegahat = self.model.calc_weightmatrix(
moms,
weights_method=weights_method,
wargs=wargs,
params=self.params)
if has_optimal_weights: #has_optimal_weights:
# TOD0 make has_optimal_weights depend on convergence or iter >2
cov = np.linalg.inv(np.dot(gradmoms.T,
np.dot(np.linalg.inv(omegahat), gradmoms)))
else:
gw = np.dot(gradmoms.T, weights)
gwginv = np.linalg.inv(np.dot(gw, gradmoms))
cov = np.dot(np.dot(gwginv, np.dot(np.dot(gw, omegahat), gw.T)), gwginv)
#cov /= nobs
return cov/nobs | calculate covariance of parameter estimates
not all options tried out yet
If weights matrix is given, then the formula use to calculate cov_params
depends on whether has_optimal_weights is true.
If no weights are given, then the weight matrix is calculated with
the given method, and has_optimal_weights is assumed to be true.
(API Note: The latter assumption could be changed if we allow for
has_optimal_weights=None.) | calc_cov_params | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def bse_(self):
'''standard error of the parameter estimates
'''
return self.get_bse() | standard error of the parameter estimates | bse_ | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def get_bse(self, **kwds):
'''standard error of the parameter estimates with options
Parameters
----------
kwds : optional keywords
options for calculating cov_params
Returns
-------
bse : ndarray
estimated standard error of parameter estimates
'''
return np.sqrt(np.diag(self.cov_params(**kwds))) | standard error of the parameter estimates with options
Parameters
----------
kwds : optional keywords
options for calculating cov_params
Returns
-------
bse : ndarray
estimated standard error of parameter estimates | get_bse | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def jtest(self):
'''overidentification test
I guess this is missing a division by nobs,
what's the normalization in jval ?
'''
jstat = self.jval
nparams = self.params.size #self.nparams
df = self.model.nmoms - nparams
return jstat, stats.chi2.sf(jstat, df), df | overidentification test
I guess this is missing a division by nobs,
what's the normalization in jval ? | jtest | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def compare_j(self, other):
'''overidentification test for comparing two nested gmm estimates
This assumes that some moment restrictions have been dropped in one
of the GMM estimates relative to the other.
Not tested yet
We are comparing two separately estimated models, that use different
weighting matrices. It is not guaranteed that the resulting
difference is positive.
TODO: Check in which cases Stata programs use the same weigths
'''
jstat1 = self.jval
k_moms1 = self.model.nmoms
jstat2 = other.jval
k_moms2 = other.model.nmoms
jdiff = jstat1 - jstat2
df = k_moms1 - k_moms2
if df < 0:
# possible nested in other way, TODO allow this or not
# flip sign instead of absolute
df = - df
jdiff = - jdiff
return jdiff, stats.chi2.sf(jdiff, df), df | overidentification test for comparing two nested gmm estimates
This assumes that some moment restrictions have been dropped in one
of the GMM estimates relative to the other.
Not tested yet
We are comparing two separately estimated models, that use different
weighting matrices. It is not guaranteed that the resulting
difference is positive.
TODO: Check in which cases Stata programs use the same weigths | compare_j | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Default is `var_##` for ## in p the number of regressors
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: add a summary text for options that have been used
jvalue, jpvalue, jdf = self.jtest()
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['GMM']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
#('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
#('Df Model:', None), #[self.df_model])
]
top_right = [#('R-squared:', ["%#8.3f" % self.rsquared]),
#('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
('Hansen J:', ["%#8.4g" % jvalue] ),
('Prob (Hansen J):', ["%#6.3g" % jpvalue]),
#('F-statistic:', ["%#8.4g" % self.fvalue] ),
#('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
#('Log-Likelihood:', None), #["%#6.4g" % self.llf]),
#('AIC:', ["%#8.4g" % self.aic]),
#('BIC:', ["%#8.4g" % self.bic])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Results"
# create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
return smry | Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Default is `var_##` for ## in p the number of regressors
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results | summary | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def fitstart(self):
"""Create array of zeros"""
return np.zeros(self.exog.shape[1]) | Create array of zeros | fitstart | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def start_weights(self, inv=True):
"""Starting weights"""
zz = np.dot(self.instrument.T, self.instrument)
nobs = self.instrument.shape[0]
if inv:
return zz / nobs
else:
return np.linalg.pinv(zz / nobs) | Starting weights | start_weights | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def get_error(self, params):
"""Get error at params"""
return self.endog - self.predict(params) | Get error at params | get_error | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def predict(self, params, exog=None):
"""Get prediction at params"""
if exog is None:
exog = self.exog
return np.dot(exog, params) | Get prediction at params | predict | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def momcond(self, params):
"""Error times instrument"""
instrument = self.instrument
return instrument * self.get_error(params)[:, None] | Error times instrument | momcond | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def fitgmm(self, start, weights=None, optim_method=None, **kwds):
'''estimate parameters using GMM for linear model
Uses closed form expression instead of nonlinear optimizers
Parameters
----------
start : not used
starting values for minimization, not used, only for consistency
of method signature
weights : ndarray
weighting matrix for moment conditions. If weights is None, then
the identity matrix is used
optim_method : not used,
optimization method, not used, only for consistency of method
signature
**kwds : keyword arguments
not used, will be silently ignored (for compatibility with generic)
Returns
-------
paramest : ndarray
estimated parameters
'''
## if not fixed is None: #fixed not defined in this version
## raise NotImplementedError
# TODO: should start_weights only be in `fit`
if weights is None:
weights = self.start_weights(inv=False)
y, x, z = self.endog, self.exog, self.instrument
zTx = np.dot(z.T, x)
zTy = np.dot(z.T, y)
# normal equation, solved with pinv
part0 = zTx.T.dot(weights)
part1 = part0.dot(zTx)
part2 = part0.dot(zTy)
params = np.linalg.pinv(part1).dot(part2)
return params | estimate parameters using GMM for linear model
Uses closed form expression instead of nonlinear optimizers
Parameters
----------
start : not used
starting values for minimization, not used, only for consistency
of method signature
weights : ndarray
weighting matrix for moment conditions. If weights is None, then
the identity matrix is used
optim_method : not used,
optimization method, not used, only for consistency of method
signature
**kwds : keyword arguments
not used, will be silently ignored (for compatibility with generic)
Returns
-------
paramest : ndarray
estimated parameters | fitgmm | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def fittedvalues(self):
"""Fitted values"""
return self.model.predict(self.params) | Fitted values | fittedvalues | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def resid(self):
"""Residuals"""
return self.model.endog - self.fittedvalues | Residuals | resid | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def ssr(self):
"""Sum of square errors"""
return (self.resid * self.resid).sum(0) | Sum of square errors | ssr | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def spec_hausman(params_e, params_i, cov_params_e, cov_params_i, dof=None):
'''Hausmans specification test
Parameters
----------
params_e : ndarray
efficient and consistent under Null hypothesis,
inconsistent under alternative hypothesis
params_i : ndarray
consistent under Null hypothesis,
consistent under alternative hypothesis
cov_params_e : ndarray, 2d
covariance matrix of parameter estimates for params_e
cov_params_i : ndarray, 2d
covariance matrix of parameter estimates for params_i
example instrumental variables OLS estimator is `e`, IV estimator is `i`
Notes
-----
Todos,Issues
- check dof calculations and verify for linear case
- check one-sided hypothesis
References
----------
Greene section 5.5 p.82/83
'''
params_diff = (params_i - params_e)
cov_diff = cov_params_i - cov_params_e
#TODO: the following is very inefficient, solves problem (svd) twice
#use linalg.lstsq or svd directly
#cov_diff will very often be in-definite (singular)
if not dof:
dof = np.linalg.matrix_rank(cov_diff)
cov_diffpinv = np.linalg.pinv(cov_diff)
H = np.dot(params_diff, np.dot(cov_diffpinv, params_diff))
pval = stats.chi2.sf(H, dof)
evals = np.linalg.eigvalsh(cov_diff)
return H, pval, dof, evals | Hausmans specification test
Parameters
----------
params_e : ndarray
efficient and consistent under Null hypothesis,
inconsistent under alternative hypothesis
params_i : ndarray
consistent under Null hypothesis,
consistent under alternative hypothesis
cov_params_e : ndarray, 2d
covariance matrix of parameter estimates for params_e
cov_params_i : ndarray, 2d
covariance matrix of parameter estimates for params_i
example instrumental variables OLS estimator is `e`, IV estimator is `i`
Notes
-----
Todos,Issues
- check dof calculations and verify for linear case
- check one-sided hypothesis
References
----------
Greene section 5.5 p.82/83 | spec_hausman | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def momcond(self, params): #drop distfn as argument
#, mom2, quantile=None, shape=None
'''moment conditions for estimating distribution parameters by matching
quantiles, defines as many moment conditions as quantiles.
Returns
-------
difference : ndarray
difference between theoretical and empirical quantiles
Notes
-----
This can be used for method of moments or for generalized method of
moments.
'''
#this check looks redundant/unused know
if len(params) == 2:
loc, scale = params
elif len(params) == 3:
shape, loc, scale = params
else:
#raise NotImplementedError
pass #see whether this might work, seems to work for beta with 2 shape args
#mom2diff = np.array(distfn.stats(*params)) - mom2
#if not quantile is None:
pq, xq = self.pquant, self.xquant
#ppfdiff = distfn.ppf(pq, alpha)
cdfdiff = self.distfn.cdf(xq, *params) - pq
#return np.concatenate([mom2diff, cdfdiff[:1]])
return np.atleast_2d(cdfdiff) | moment conditions for estimating distribution parameters by matching
quantiles, defines as many moment conditions as quantiles.
Returns
-------
difference : ndarray
difference between theoretical and empirical quantiles
Notes
-----
This can be used for method of moments or for generalized method of
moments. | momcond | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def fitonce(self, start=None, weights=None, has_optimal_weights=False):
'''fit without estimating an optimal weighting matrix and return results
This is a convenience function that calls fitgmm and covparams with
a given weight matrix or the identity weight matrix.
This is useful if the optimal weight matrix is know (or is analytically
given) or if an optimal weight matrix cannot be calculated.
(Developer Notes: this function could go into GMM, but is needed in this
class, at least at the moment.)
Parameters
----------
Returns
-------
results : GMMResult instance
result instance with params and _cov_params attached
See Also
--------
fitgmm
cov_params
'''
if weights is None:
weights = np.eye(self.nmoms)
params = self.fitgmm(start=start)
# TODO: rewrite this old hack, should use fitgmm or fit maxiter=0
self.results.params = params #required before call to self.cov_params
self.results.wargs = {} #required before call to self.cov_params
self.results.options_other = {'weights_method':'cov'}
# TODO: which weights_method? There should not be any needed ?
_cov_params = self.results.cov_params(weights=weights,
has_optimal_weights=has_optimal_weights)
self.results.weights = weights
self.results.jval = self.gmmobjective(params, weights)
self.results.options_other.update({'has_optimal_weights':has_optimal_weights})
return self.results | fit without estimating an optimal weighting matrix and return results
This is a convenience function that calls fitgmm and covparams with
a given weight matrix or the identity weight matrix.
This is useful if the optimal weight matrix is know (or is analytically
given) or if an optimal weight matrix cannot be calculated.
(Developer Notes: this function could go into GMM, but is needed in this
class, at least at the moment.)
Parameters
----------
Returns
-------
results : GMMResult instance
result instance with params and _cov_params attached
See Also
--------
fitgmm
cov_params | fitonce | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def atleast_2dcol(x):
''' convert array_like to 2d from 1d or 0d
not tested because not used
'''
x = np.asarray(x)
if (x.ndim == 1):
x = x[:, None]
elif (x.ndim == 0):
x = np.atleast_2d(x)
elif (x.ndim > 0):
raise ValueError('too many dimensions')
return x | convert array_like to 2d from 1d or 0d
not tested because not used | atleast_2dcol | python | statsmodels/statsmodels | statsmodels/sandbox/regression/predstd.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/predstd.py | BSD-3-Clause |
def wls_prediction_std(res, exog=None, weights=None, alpha=0.05):
'''calculate standard deviation and confidence interval for prediction
applies to WLS and OLS, not to general GLS,
that is independently but not identically distributed observations
Parameters
----------
res : regression result instance
results of WLS or OLS regression required attributes see notes
exog : array_like (optional)
exogenous variables for points to predict
weights : scalar or array_like (optional)
weights as defined for WLS (inverse of variance of observation)
alpha : float (default: alpha = 0.05)
confidence level for two-sided hypothesis
Returns
-------
predstd : array_like, 1d
standard error of prediction
same length as rows of exog
interval_l, interval_u : array_like
lower und upper confidence bounds
Notes
-----
The result instance needs to have at least the following
res.model.predict() : predicted values or
res.fittedvalues : values used in estimation
res.cov_params() : covariance matrix of parameter estimates
If exog is 1d, then it is interpreted as one observation,
i.e. a row vector.
testing status: not compared with other packages
References
----------
Greene p.111 for OLS, extended to WLS by analogy
'''
# work around current bug:
# fit does not attach results to model, predict broken
#res.model.results
covb = res.cov_params()
if exog is None:
exog = res.model.exog
predicted = res.fittedvalues
if weights is None:
weights = res.model.weights
else:
exog = np.atleast_2d(exog)
if covb.shape[1] != exog.shape[1]:
raise ValueError('wrong shape of exog')
predicted = res.model.predict(res.params, exog)
if weights is None:
weights = 1.
else:
weights = np.asarray(weights)
if weights.size > 1 and len(weights) != exog.shape[0]:
raise ValueError('weights and exog do not have matching shape')
# full covariance:
#predvar = res3.mse_resid + np.diag(np.dot(X2,np.dot(covb,X2.T)))
# predication variance only
predvar = res.mse_resid/weights + (exog * np.dot(covb, exog.T).T).sum(1)
predstd = np.sqrt(predvar)
tppf = stats.t.isf(alpha/2., res.df_resid)
interval_u = predicted + tppf * predstd
interval_l = predicted - tppf * predstd
return predstd, interval_l, interval_u | calculate standard deviation and confidence interval for prediction
applies to WLS and OLS, not to general GLS,
that is independently but not identically distributed observations
Parameters
----------
res : regression result instance
results of WLS or OLS regression required attributes see notes
exog : array_like (optional)
exogenous variables for points to predict
weights : scalar or array_like (optional)
weights as defined for WLS (inverse of variance of observation)
alpha : float (default: alpha = 0.05)
confidence level for two-sided hypothesis
Returns
-------
predstd : array_like, 1d
standard error of prediction
same length as rows of exog
interval_l, interval_u : array_like
lower und upper confidence bounds
Notes
-----
The result instance needs to have at least the following
res.model.predict() : predicted values or
res.fittedvalues : values used in estimation
res.cov_params() : covariance matrix of parameter estimates
If exog is 1d, then it is interpreted as one observation,
i.e. a row vector.
testing status: not compared with other packages
References
----------
Greene p.111 for OLS, extended to WLS by analogy | wls_prediction_std | python | statsmodels/statsmodels | statsmodels/sandbox/regression/predstd.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/predstd.py | BSD-3-Clause |
def pdf_moments_st(cnt):
"""Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
version of scipy.stats, any changes ?
the scipy.stats version has a bug and returns normal distribution
"""
N = len(cnt)
if N < 2:
raise ValueError("At least two moments must be given to "
"approximate the pdf.")
totp = poly1d(1)
sig = sqrt(cnt[1])
mu = cnt[0]
if N > 2:
Dvals = _hermnorm(N + 1)
for k in range(3, N + 1):
# Find Ck
Ck = 0.0
for n in range((k - 3) / 2):
m = k - 2 * n
if m % 2: # m is odd
momdiff = cnt[m - 1]
else:
momdiff = cnt[m - 1] - sig * sig * scipy.factorial2(m - 1)
Ck += Dvals[k][m] / sig ** m * momdiff
# Add to totp
raise SystemError
print(Dvals)
print(Ck)
totp = totp + Ck * Dvals[k]
def thisfunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn * xn / 2.0) / sqrt(2 * np.pi) / sig
return thisfunc, totp | Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
version of scipy.stats, any changes ?
the scipy.stats version has a bug and returns normal distribution | pdf_moments_st | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/extras.py | BSD-3-Clause |
def pdf_mvsk(mvsk):
"""Return the Gaussian expanded pdf function given the list of 1st, 2nd
moment and skew and Fisher (excess) kurtosis.
Parameters
----------
mvsk : list of mu, mc2, skew, kurt
distribution is matched to these four moments
Returns
-------
pdffunc : function
function that evaluates the pdf(x), where x is the non-standardized
random variable.
Notes
-----
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30
"""
N = len(mvsk)
if N < 4:
raise ValueError("Four moments must be given to "
"approximate the pdf.")
mu, mc2, skew, kurt = mvsk
totp = poly1d(1)
sig = sqrt(mc2)
if N > 2:
Dvals = _hermnorm(N + 1)
C3 = skew / 6.0
C4 = kurt / 24.0
# Note: Hermite polynomial for order 3 in _hermnorm is negative
# instead of positive
totp = totp - C3 * Dvals[3] + C4 * Dvals[4]
def pdffunc(x):
xn = (x - mu) / sig
return totp(xn) * np.exp(-xn * xn / 2.0) / np.sqrt(2 * np.pi) / sig
return pdffunc | Return the Gaussian expanded pdf function given the list of 1st, 2nd
moment and skew and Fisher (excess) kurtosis.
Parameters
----------
mvsk : list of mu, mc2, skew, kurt
distribution is matched to these four moments
Returns
-------
pdffunc : function
function that evaluates the pdf(x), where x is the non-standardized
random variable.
Notes
-----
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30 | pdf_mvsk | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/extras.py | BSD-3-Clause |
def pdf_moments(cnt):
"""Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
Notes
-----
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30
"""
N = len(cnt)
if N < 2:
raise ValueError("At least two moments must be given to "
"approximate the pdf.")
mc, mc2, mc3, mc4 = cnt
skew = mc3 / mc2 ** 1.5
kurt = mc4 / mc2 ** 2.0 - 3.0 # Fisher kurtosis, excess kurtosis
totp = poly1d(1)
sig = sqrt(cnt[1])
mu = cnt[0]
if N > 2:
Dvals = _hermnorm(N + 1)
## for k in range(3,N+1):
## # Find Ck
## Ck = 0.0
## for n in range((k-3)/2):
## m = k-2*n
## if m % 2: # m is odd
## momdiff = cnt[m-1]
## else:
## momdiff = cnt[m-1] - sig*sig*scipy.factorial2(m-1)
## Ck += Dvals[k][m] / sig**m * momdiff
## # Add to totp
## raise
## print Dvals
## print Ck
## totp = totp + Ck*Dvals[k]
C3 = skew / 6.0
C4 = kurt / 24.0
totp = totp - C3 * Dvals[3] + C4 * Dvals[4]
def thisfunc(x):
xn = (x - mu) / sig
return totp(xn) * np.exp(-xn * xn / 2.0) / np.sqrt(2 * np.pi) / sig
return thisfunc | Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
Notes
-----
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30 | pdf_moments | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/extras.py | BSD-3-Clause |
def mvstdnormcdf(lower, upper, corrcoef, **kwds):
"""standardized multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a standardized multivariate normal
distribution.
This function assumes standardized scale, that is the variance in each dimension
is one, but correlation can be arbitrary, covariance = correlation matrix
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
corrcoef : float or array_like
specifies correlation matrix in one of three ways, see notes
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
The correlation matrix corrcoef can be given in 3 different ways
If the multivariate normal is two-dimensional than only the
correlation coefficient needs to be provided.
For general dimension the correlation matrix can be provided either
as a one-dimensional array of the upper triangular correlation
coefficients stacked by rows, or as full square correlation matrix
See Also
--------
mvnormcdf : cdf of multivariate normal distribution without
standardization
Examples
--------
>>> print(mvstdnormcdf([-np.inf,-np.inf], [0.0,np.inf], 0.5))
0.5
>>> corr = [[1.0, 0, 0.5],[0,1,0],[0.5,0,1]]
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0], [0.0,0.0,0.0], corr, abseps=1e-6))
0.166666399198
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0],corr, abseps=1e-8))
something wrong completion with ERROR > EPS and MAXPTS function values used;
increase MAXPTS to decrease ERROR; 1.048330348e-006
0.166666546218
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0], corr, \
maxpts=100000, abseps=1e-8))
0.166666588293
"""
n = len(lower)
# do not know if converting to array is necessary,
# but it makes ndim check possible
lower = np.array(lower)
upper = np.array(upper)
corrcoef = np.array(corrcoef)
correl = np.zeros(int(n * (n - 1) / 2.0)) # dtype necessary?
if (lower.ndim != 1) or (upper.ndim != 1):
raise ValueError('can handle only 1D bounds')
if len(upper) != n:
raise ValueError('bounds have different lengths')
if n == 2 and corrcoef.size == 1:
correl = corrcoef
# print 'case scalar rho', n
elif corrcoef.ndim == 1 and len(corrcoef) == n * (n - 1) / 2.0:
# print 'case flat corr', corrcoeff.shape
correl = corrcoef
elif corrcoef.shape == (n, n):
# print 'case square corr', correl.shape
correl = corrcoef[np.tril_indices(n, -1)]
# for ii in range(n):
# for jj in range(ii):
# correl[ jj + ((ii-2)*(ii-1))/2] = corrcoef[ii,jj]
else:
raise ValueError('corrcoef has incorrect dimension')
if 'maxpts' not in kwds:
if n > 2:
kwds['maxpts'] = 10000 * n
lowinf = np.isneginf(lower)
uppinf = np.isposinf(upper)
infin = 2.0 * np.ones(n)
np.putmask(infin, lowinf, 0) # infin.putmask(0,lowinf)
np.putmask(infin, uppinf, 1) # infin.putmask(1,uppinf)
# this has to be last
np.putmask(infin, lowinf * uppinf, -1)
## #remove infs
## np.putmask(lower,lowinf,-100)# infin.putmask(0,lowinf)
## np.putmask(upper,uppinf,100) #infin.putmask(1,uppinf)
# print lower,',',upper,',',infin,',',correl
# print correl.shape
# print kwds.items()
error, cdfvalue, inform = mvndst(lower, upper, infin, correl, **kwds)
if inform:
print('something wrong', informcode[inform], error)
return cdfvalue | standardized multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a standardized multivariate normal
distribution.
This function assumes standardized scale, that is the variance in each dimension
is one, but correlation can be arbitrary, covariance = correlation matrix
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
corrcoef : float or array_like
specifies correlation matrix in one of three ways, see notes
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
The correlation matrix corrcoef can be given in 3 different ways
If the multivariate normal is two-dimensional than only the
correlation coefficient needs to be provided.
For general dimension the correlation matrix can be provided either
as a one-dimensional array of the upper triangular correlation
coefficients stacked by rows, or as full square correlation matrix
See Also
--------
mvnormcdf : cdf of multivariate normal distribution without
standardization
Examples
--------
>>> print(mvstdnormcdf([-np.inf,-np.inf], [0.0,np.inf], 0.5))
0.5
>>> corr = [[1.0, 0, 0.5],[0,1,0],[0.5,0,1]]
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0], [0.0,0.0,0.0], corr, abseps=1e-6))
0.166666399198
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0],corr, abseps=1e-8))
something wrong completion with ERROR > EPS and MAXPTS function values used;
increase MAXPTS to decrease ERROR; 1.048330348e-006
0.166666546218
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0], corr, \
maxpts=100000, abseps=1e-8))
0.166666588293 | mvstdnormcdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/extras.py | BSD-3-Clause |
def mvnormcdf(upper, mu, cov, lower=None, **kwds):
"""multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a multivariate normal distribution.
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
mu : array_lik, 1d
list or array of means
cov : array_like, 2d
specifies covariance matrix
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
This function normalizes the location and scale of the multivariate
normal distribution and then uses `mvstdnormcdf` to call the integration.
See Also
--------
mvstdnormcdf : location and scale standardized multivariate normal cdf
"""
upper = np.array(upper)
if lower is None:
lower = -np.ones(upper.shape) * np.inf
else:
lower = np.array(lower)
cov = np.array(cov)
stdev = np.sqrt(np.diag(cov)) # standard deviation vector
# do I need to make sure stdev is float and not int?
# is this correct to normalize to corr?
lower = (lower - mu) / stdev
upper = (upper - mu) / stdev
divrow = np.atleast_2d(stdev)
corr = cov / divrow / divrow.T
# v/np.sqrt(np.atleast_2d(np.diag(covv)))/np.sqrt(np.atleast_2d(np.diag(covv))).T
return mvstdnormcdf(lower, upper, corr, **kwds) | multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a multivariate normal distribution.
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
mu : array_lik, 1d
list or array of means
cov : array_like, 2d
specifies covariance matrix
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
This function normalizes the location and scale of the multivariate
normal distribution and then uses `mvstdnormcdf` to call the integration.
See Also
--------
mvstdnormcdf : location and scale standardized multivariate normal cdf | mvnormcdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/extras.py | BSD-3-Clause |
def mean_residual_life(x, frac=None, alpha=0.05):
'''empirical mean residual life or expected shortfall
Parameters
----------
x : 1-dimensional array_like
frac : list[float], optional
All entries must be between 0 and 1
alpha : float, default 0.05
FIXME: not actually used.
TODO:
check formula for std of mean
does not include case for all observations
last observations std is zero
vectorize loop using cumsum
frac does not work yet
'''
axis = 0 # searchsorted is 1d only
x = np.asarray(x)
nobs = x.shape[axis]
xsorted = np.sort(x, axis=axis)
if frac is None:
xthreshold = xsorted
else:
xthreshold = xsorted[np.floor(nobs * frac).astype(int)]
# use searchsorted instead of simple index in case of ties
xlargerindex = np.searchsorted(xsorted, xthreshold, side='right')
# TODO:replace loop with cumsum ?
result = []
for i in range(len(xthreshold)-1):
k_ind = xlargerindex[i]
rmean = x[k_ind:].mean()
# this does not work for last observations, nans
rstd = x[k_ind:].std()
rmstd = rstd/np.sqrt(nobs-k_ind) # std error of mean, check formula
result.append((k_ind, xthreshold[i], rmean, rmstd))
res = np.array(result)
crit = 1.96 # TODO: without loading stats, crit = -stats.t.ppf(0.05)
confint = res[:, 1:2] + crit * res[:, -1:] * np.array([[-1, 1]])
return np.column_stack((res, confint)) | empirical mean residual life or expected shortfall
Parameters
----------
x : 1-dimensional array_like
frac : list[float], optional
All entries must be between 0 and 1
alpha : float, default 0.05
FIXME: not actually used.
TODO:
check formula for std of mean
does not include case for all observations
last observations std is zero
vectorize loop using cumsum
frac does not work yet | mean_residual_life | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/try_pot.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/try_pot.py | BSD-3-Clause |
def _fitstart(self, x):
'''example method, method of moment estimator as starting values
Parameters
----------
x : ndarray
data for which the parameters are estimated
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
This example was written for the gamma distribution, but not verified
with literature
'''
loc = np.min([x.min(),0])
a = 4/stats.skew(x)**2
scale = np.std(x) / np.sqrt(a)
return (a, loc, scale) | example method, method of moment estimator as starting values
Parameters
----------
x : ndarray
data for which the parameters are estimated
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
This example was written for the gamma distribution, but not verified
with literature | _fitstart | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def _fitstart_beta(self, x, fixed=None):
'''method of moment estimator as starting values for beta distribution
Parameters
----------
x : ndarray
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
for method of moment estimator for known loc and scale
https://en.wikipedia.org/wiki/Beta_distribution#Parameter_estimation
http://www.itl.nist.gov/div898/handbook/eda/section3/eda366h.htm
NIST reference also includes reference to MLE in
Johnson, Kotz, and Balakrishan, Volume II, pages 221-235
'''
#todo: separate out this part to be used for other compact support distributions
# e.g. rdist, vonmises, and truncnorm
# but this might not work because it might still be distribution specific
a, b = x.min(), x.max()
eps = (a-b)*0.01
if fixed is None:
#this part not checked with books
loc = a - eps
scale = (a - b) * (1 + 2*eps)
else:
if np.isnan(fixed[-2]):
#estimate loc
loc = a - eps
else:
loc = fixed[-2]
if np.isnan(fixed[-1]):
#estimate scale
scale = (b + eps) - loc
else:
scale = fixed[-1]
#method of moment for known loc scale:
scale = float(scale)
xtrans = (x - loc)/scale
xm = xtrans.mean()
xv = xtrans.var()
tmp = (xm*(1-xm)/xv - 1)
p = xm * tmp
q = (1 - xm) * tmp
return (p, q, loc, scale) #check return type and should fixed be returned ? | method of moment estimator as starting values for beta distribution
Parameters
----------
x : ndarray
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
for method of moment estimator for known loc and scale
https://en.wikipedia.org/wiki/Beta_distribution#Parameter_estimation
http://www.itl.nist.gov/div898/handbook/eda/section3/eda366h.htm
NIST reference also includes reference to MLE in
Johnson, Kotz, and Balakrishan, Volume II, pages 221-235 | _fitstart_beta | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def _fitstart_poisson(self, x, fixed=None):
'''maximum likelihood estimator as starting values for Poisson distribution
Parameters
----------
x : ndarray
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
MLE :
https://en.wikipedia.org/wiki/Poisson_distribution#Maximum_likelihood
'''
#todo: separate out this part to be used for other compact support distributions
# e.g. rdist, vonmises, and truncnorm
# but this might not work because it might still be distribution specific
a = x.min()
eps = 0 # is this robust ?
if fixed is None:
#this part not checked with books
loc = a - eps
else:
if np.isnan(fixed[-1]):
#estimate loc
loc = a - eps
else:
loc = fixed[-1]
#MLE for standard (unshifted, if loc=0) Poisson distribution
xtrans = (x - loc)
lambd = xtrans.mean()
#second derivative d loglike/ dlambd Not used
#dlldlambd = 1/lambd # check
return (lambd, loc) #check return type and should fixed be returned ? | maximum likelihood estimator as starting values for Poisson distribution
Parameters
----------
x : ndarray
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
MLE :
https://en.wikipedia.org/wiki/Poisson_distribution#Maximum_likelihood | _fitstart_poisson | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def fit_fr(self, data, *args, **kwds):
'''estimate distribution parameters by MLE taking some parameters as fixed
Parameters
----------
data : ndarray, 1d
data for which the distribution parameters are estimated,
args : list ? check
starting values for optimization
kwds :
- 'frozen' : array_like
values for frozen distribution parameters and, for elements with
np.nan, the corresponding parameter will be estimated
Returns
-------
argest : ndarray
estimated parameters
Examples
--------
generate random sample
>>> np.random.seed(12345)
>>> x = stats.gamma.rvs(2.5, loc=0, scale=1.2, size=200)
estimate all parameters
>>> stats.gamma.fit(x)
array([ 2.0243194 , 0.20395655, 1.44411371])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, np.nan, np.nan])
array([ 2.0243194 , 0.20395655, 1.44411371])
keep loc fixed, estimate shape and scale parameters
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, np.nan])
array([ 2.45603985, 1.27333105])
keep loc and scale fixed, estimate shape parameter
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
array([ 3.00048828])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.2])
array([ 2.57792969])
estimate only scale parameter for fixed shape and loc
>>> stats.gamma.fit_fr(x, frozen=[2.5, 0.0, np.nan])
array([ 1.25087891])
Notes
-----
self is an instance of a distribution class. This can be attached to
scipy.stats.distributions.rv_continuous
*Todo*
* check if docstring is correct
* more input checking, args is list ? might also apply to current fit method
'''
loc0, scale0 = lmap(kwds.get, ['loc', 'scale'],[0.0, 1.0])
Narg = len(args)
if Narg == 0 and hasattr(self, '_fitstart'):
x0 = self._fitstart(data)
elif Narg > self.numargs:
raise ValueError("Too many input arguments.")
else:
args += (1.0,)*(self.numargs-Narg)
# location and scale are at the end
x0 = args + (loc0, scale0)
if 'frozen' in kwds:
frmask = np.array(kwds['frozen'])
if len(frmask) != self.numargs+2:
raise ValueError("Incorrect number of frozen arguments.")
else:
# keep starting values for not frozen parameters
for n in range(len(frmask)):
# Troubleshooting ex_generic_mle_tdist
if isinstance(frmask[n], np.ndarray) and frmask[n].size == 1:
frmask[n] = frmask[n].item()
# If there were array elements, then frmask will be object-dtype,
# in which case np.isnan will raise TypeError
frmask = frmask.astype(np.float64)
x0 = np.array(x0)[np.isnan(frmask)]
else:
frmask = None
#print(x0
#print(frmask
return optimize.fmin(self.nnlf_fr, x0,
args=(np.ravel(data), frmask), disp=0) | estimate distribution parameters by MLE taking some parameters as fixed
Parameters
----------
data : ndarray, 1d
data for which the distribution parameters are estimated,
args : list ? check
starting values for optimization
kwds :
- 'frozen' : array_like
values for frozen distribution parameters and, for elements with
np.nan, the corresponding parameter will be estimated
Returns
-------
argest : ndarray
estimated parameters
Examples
--------
generate random sample
>>> np.random.seed(12345)
>>> x = stats.gamma.rvs(2.5, loc=0, scale=1.2, size=200)
estimate all parameters
>>> stats.gamma.fit(x)
array([ 2.0243194 , 0.20395655, 1.44411371])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, np.nan, np.nan])
array([ 2.0243194 , 0.20395655, 1.44411371])
keep loc fixed, estimate shape and scale parameters
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, np.nan])
array([ 2.45603985, 1.27333105])
keep loc and scale fixed, estimate shape parameter
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
array([ 3.00048828])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.2])
array([ 2.57792969])
estimate only scale parameter for fixed shape and loc
>>> stats.gamma.fit_fr(x, frozen=[2.5, 0.0, np.nan])
array([ 1.25087891])
Notes
-----
self is an instance of a distribution class. This can be attached to
scipy.stats.distributions.rv_continuous
*Todo*
* check if docstring is correct
* more input checking, args is list ? might also apply to current fit method | fit_fr | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def expect(self, fn=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False):
'''calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution
conditional : bool (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
'''
if fn is None:
def fun(x, *args):
return x*self.pdf(x, loc=loc, scale=scale, *args)
else:
def fun(x, *args):
return fn(x)*self.pdf(x, loc=loc, scale=scale, *args)
if lb is None:
lb = loc + self.a * scale #(self.a - loc)/(1.0*scale)
if ub is None:
ub = loc + self.b * scale #(self.b - loc)/(1.0*scale)
if conditional:
invfac = (self.sf(lb, loc=loc, scale=scale, *args)
- self.sf(ub, loc=loc, scale=scale, *args))
else:
invfac = 1.0
return integrate.quad(fun, lb, ub,
args=args)[0]/invfac | calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution
conditional : bool (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad. | expect | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def expect_v2(self, fn=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False):
'''calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set using
quantiles of the distribution, see Notes
conditional : bool (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
The default limits are lb = self.ppf(1e-9, *args), ub = self.ppf(1-1e-9, *args)
For some heavy tailed distributions, 'alpha', 'cauchy', 'halfcauchy',
'levy', 'levy_l', and for 'ncf', the default limits are not set correctly
even when the expectation of the function is finite. In this case, the
integration limits, lb and ub, should be chosen by the user. For example,
for the ncf distribution, ub=1000 works in the examples.
There are also problems with numerical integration in some other cases,
for example if the distribution is very concentrated and the default limits
are too large.
'''
#changes: 20100809
#correction and refactoring how loc and scale are handled
#uses now _pdf
#needs more testing for distribution with bound support, e.g. genpareto
if fn is None:
def fun(x, *args):
return (loc + x*scale)*self._pdf(x, *args)
else:
def fun(x, *args):
return fn(loc + x*scale)*self._pdf(x, *args)
if lb is None:
#lb = self.a
try:
lb = self.ppf(1e-9, *args) #1e-14 quad fails for pareto
except ValueError:
lb = self.a
else:
lb = max(self.a, (lb - loc)/(1.0*scale)) #transform to standardized
if ub is None:
#ub = self.b
try:
ub = self.ppf(1-1e-9, *args)
except ValueError:
ub = self.b
else:
ub = min(self.b, (ub - loc)/(1.0*scale))
if conditional:
invfac = self._sf(lb,*args) - self._sf(ub,*args)
else:
invfac = 1.0
return integrate.quad(fun, lb, ub,
args=args, limit=500)[0]/invfac | calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set using
quantiles of the distribution, see Notes
conditional : bool (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
The default limits are lb = self.ppf(1e-9, *args), ub = self.ppf(1-1e-9, *args)
For some heavy tailed distributions, 'alpha', 'cauchy', 'halfcauchy',
'levy', 'levy_l', and for 'ncf', the default limits are not set correctly
even when the expectation of the function is finite. In this case, the
integration limits, lb and ub, should be chosen by the user. For example,
for the ncf distribution, ub=1000 works in the examples.
There are also problems with numerical integration in some other cases,
for example if the distribution is very concentrated and the default limits
are too large. | expect_v2 | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def distfitbootstrap(sample, distr, nrepl=100):
'''run bootstrap for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : ndarray
original sample data for bootstrap
distr : distribution instance with fit_fr method
nrepl : int
number of bootstrap replications
Returns
-------
res : array (nrepl,)
parameter estimates for all bootstrap replications
'''
nobs = len(sample)
res = np.zeros(nrepl)
for ii in range(nrepl):
rvsind = np.random.randint(nobs, size=nobs)
x = sample[rvsind]
res[ii] = distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
return res | run bootstrap for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : ndarray
original sample data for bootstrap
distr : distribution instance with fit_fr method
nrepl : int
number of bootstrap replications
Returns
-------
res : array (nrepl,)
parameter estimates for all bootstrap replications | distfitbootstrap | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def distfitmc(sample, distr, nrepl=100, distkwds={}):
'''run Monte Carlo for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : ndarray
original sample data, in Monte Carlo only used to get nobs,
distr : distribution instance with fit_fr method
nrepl : int
number of Monte Carlo replications
Returns
-------
res : array (nrepl,)
parameter estimates for all Monte Carlo replications
'''
arg = distkwds.pop('arg')
nobs = len(sample)
res = np.zeros(nrepl)
for ii in range(nrepl):
x = distr.rvs(arg, size=nobs, **distkwds)
res[ii] = distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
return res | run Monte Carlo for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : ndarray
original sample data, in Monte Carlo only used to get nobs,
distr : distribution instance with fit_fr method
nrepl : int
number of Monte Carlo replications
Returns
-------
res : array (nrepl,)
parameter estimates for all Monte Carlo replications | distfitmc | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def printresults(sample, arg, bres, kind='bootstrap'):
'''calculate and print(Bootstrap or Monte Carlo result
Parameters
----------
sample : ndarray
original sample data
arg : float (for general case will be array)
bres : ndarray
parameter estimates from Bootstrap or Monte Carlo run
kind : {'bootstrap', 'montecarlo'}
output is printed for Mootstrap (default) or Monte Carlo
Returns
-------
None, currently only printing
Notes
-----
still a bit a mess because it is used for both Bootstrap and Monte Carlo
made correction:
reference point for bootstrap is estimated parameter
not clear:
I'm not doing any ddof adjustment in estimation of variance, do we
need ddof>0 ?
todo: return results and string instead of printing
'''
print('true parameter value')
print(arg)
print('MLE estimate of parameters using sample (nobs=%d)'% (nobs))
argest = distr.fit_fr(sample, frozen=[np.nan, 0.0, 1.0])
print(argest)
if kind == 'bootstrap':
#bootstrap compares to estimate from sample
arg = argest
print('%s distribution of parameter estimate (nrepl=%d)'% (kind, nrepl))
print(f'mean = {bres.mean(0):f}, bias={bres.mean(0)-arg:f}')
print('median', np.median(bres, axis=0))
print('var and std', bres.var(0), np.sqrt(bres.var(0)))
bmse = ((bres - arg)**2).mean(0)
print('mse, rmse', bmse, np.sqrt(bmse))
bressorted = np.sort(bres)
print('%s confidence interval (90%% coverage)' % kind)
print(bressorted[np.floor(nrepl*0.05)], bressorted[np.floor(nrepl*0.95)])
print('%s confidence interval (90%% coverage) normal approximation' % kind)
print(stats.norm.ppf(0.05, loc=bres.mean(), scale=bres.std()),)
print(stats.norm.isf(0.05, loc=bres.mean(), scale=bres.std()))
print('Kolmogorov-Smirnov test for normality of %s distribution' % kind)
print(' - estimated parameters, p-values not really correct')
print(stats.kstest(bres, 'norm', (bres.mean(), bres.std()))) | calculate and print(Bootstrap or Monte Carlo result
Parameters
----------
sample : ndarray
original sample data
arg : float (for general case will be array)
bres : ndarray
parameter estimates from Bootstrap or Monte Carlo run
kind : {'bootstrap', 'montecarlo'}
output is printed for Mootstrap (default) or Monte Carlo
Returns
-------
None, currently only printing
Notes
-----
still a bit a mess because it is used for both Bootstrap and Monte Carlo
made correction:
reference point for bootstrap is estimated parameter
not clear:
I'm not doing any ddof adjustment in estimation of variance, do we
need ddof>0 ?
todo: return results and string instead of printing | printresults | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/sppatch.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/sppatch.py | BSD-3-Clause |
def paramstopot(thresh, shape, scale):
'''transform shape scale for peak over threshold
y = x-u|x>u ~ GPD(k, sigma-k*u) if x ~ GPD(k, sigma)
notation of de Zea Bermudez, Kotz
k, sigma is shape, scale
'''
return shape, scale - shape * thresh | transform shape scale for peak over threshold
y = x-u|x>u ~ GPD(k, sigma-k*u) if x ~ GPD(k, sigma)
notation of de Zea Bermudez, Kotz
k, sigma is shape, scale | paramstopot | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/genpareto.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/genpareto.py | BSD-3-Clause |
def meanexcess(thresh, shape, scale):
'''mean excess function of genpareto
assert are inequality conditions in de Zea Bermudez, Kotz
'''
warnif(shape > -1, 'shape > -1')
warnif(thresh >= 0, 'thresh >= 0') # make it weak inequality
warnif((scale - shape * thresh) > 0, '(scale - shape*thresh) > 0')
return (scale - shape * thresh) / (1 + shape) | mean excess function of genpareto
assert are inequality conditions in de Zea Bermudez, Kotz | meanexcess | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/genpareto.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/genpareto.py | BSD-3-Clause |
def gammamomentcond(distfn, params, mom2, quantile=None):
'''estimate distribution parameters based method of moments (mean,
variance) for distributions with 1 shape parameter and fixed loc=0.
Returns
-------
cond : function
Notes
-----
first test version, quantile argument not used
'''
def cond(params):
alpha, scale = params
mom2s = distfn.stats(alpha, 0.,scale)
#quantil
return np.array(mom2)-mom2s
return cond | estimate distribution parameters based method of moments (mean,
variance) for distributions with 1 shape parameter and fixed loc=0.
Returns
-------
cond : function
Notes
-----
first test version, quantile argument not used | gammamomentcond | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def gammamomentcond2(distfn, params, mom2, quantile=None):
'''estimate distribution parameters based method of moments (mean,
variance) for distributions with 1 shape parameter and fixed loc=0.
Returns
-------
difference : ndarray
difference between theoretical and empirical moments
Notes
-----
first test version, quantile argument not used
The only difference to previous function is return type.
'''
alpha, scale = params
mom2s = distfn.stats(alpha, 0.,scale)
return np.array(mom2)-mom2s | estimate distribution parameters based method of moments (mean,
variance) for distributions with 1 shape parameter and fixed loc=0.
Returns
-------
difference : ndarray
difference between theoretical and empirical moments
Notes
-----
first test version, quantile argument not used
The only difference to previous function is return type. | gammamomentcond2 | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def momentcondunbound(distfn, params, mom2, quantile=None):
'''moment conditions for estimating distribution parameters using method
of moments, uses mean, variance and one quantile for distributions
with 1 shape parameter.
Returns
-------
difference : ndarray
difference between theoretical and empirical moments and quantiles
'''
shape, loc, scale = params
mom2diff = np.array(distfn.stats(shape, loc,scale)) - mom2
if quantile is not None:
pq, xq = quantile
#ppfdiff = distfn.ppf(pq, alpha)
cdfdiff = distfn.cdf(xq, shape, loc, scale) - pq
return np.concatenate([mom2diff, cdfdiff[:1]])
return mom2diff | moment conditions for estimating distribution parameters using method
of moments, uses mean, variance and one quantile for distributions
with 1 shape parameter.
Returns
-------
difference : ndarray
difference between theoretical and empirical moments and quantiles | momentcondunbound | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def momentcondunboundls(distfn, params, mom2, quantile=None, shape=None):
'''moment conditions for estimating loc and scale of a distribution
with method of moments using either 2 quantiles or 2 moments (not both).
Returns
-------
difference : ndarray
difference between theoretical and empirical moments or quantiles
'''
loc, scale = params
mom2diff = np.array(distfn.stats(shape, loc, scale)) - mom2
if quantile is not None:
pq, xq = quantile
#ppfdiff = distfn.ppf(pq, alpha)
cdfdiff = distfn.cdf(xq, shape, loc, scale) - pq
#return np.concatenate([mom2diff, cdfdiff[:1]])
return cdfdiff
return mom2diff | moment conditions for estimating loc and scale of a distribution
with method of moments using either 2 quantiles or 2 moments (not both).
Returns
-------
difference : ndarray
difference between theoretical and empirical moments or quantiles | momentcondunboundls | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def momentcondquant(distfn, params, mom2, quantile=None, shape=None):
'''moment conditions for estimating distribution parameters by matching
quantiles, defines as many moment conditions as quantiles.
Returns
-------
difference : ndarray
difference between theoretical and empirical quantiles
Notes
-----
This can be used for method of moments or for generalized method of
moments.
'''
#this check looks redundant/unused know
if len(params) == 2:
loc, scale = params
elif len(params) == 3:
shape, loc, scale = params
else:
#raise NotImplementedError
pass #see whether this might work, seems to work for beta with 2 shape args
#mom2diff = np.array(distfn.stats(*params)) - mom2
#if not quantile is None:
pq, xq = quantile
#ppfdiff = distfn.ppf(pq, alpha)
cdfdiff = distfn.cdf(xq, *params) - pq
#return np.concatenate([mom2diff, cdfdiff[:1]])
return cdfdiff | moment conditions for estimating distribution parameters by matching
quantiles, defines as many moment conditions as quantiles.
Returns
-------
difference : ndarray
difference between theoretical and empirical quantiles
Notes
-----
This can be used for method of moments or for generalized method of
moments. | momentcondquant | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def nloglike(params):
'''negative loglikelihood function of binned data
corresponds to multinomial
'''
prob = np.diff(distfn.cdf(binedges, *params))
return -(lnnobsfact + np.sum(freq*np.log(prob)- special.gammaln(freq+1))) | negative loglikelihood function of binned data
corresponds to multinomial | fitbinned.nloglike | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def fitbinned(distfn, freq, binedges, start, fixed=None):
'''estimate parameters of distribution function for binned data using MLE
Parameters
----------
distfn : distribution instance
needs to have cdf method, as in scipy.stats
freq : ndarray, 1d
frequency count, e.g. obtained by histogram
binedges : ndarray, 1d
binedges including lower and upper bound
start : tuple or array_like ?
starting values, needs to have correct length
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option
added factorial
'''
if fixed is not None:
raise NotImplementedError
nobs = np.sum(freq)
lnnobsfact = special.gammaln(nobs+1)
def nloglike(params):
'''negative loglikelihood function of binned data
corresponds to multinomial
'''
prob = np.diff(distfn.cdf(binedges, *params))
return -(lnnobsfact + np.sum(freq*np.log(prob)- special.gammaln(freq+1)))
return optimize.fmin(nloglike, start) | estimate parameters of distribution function for binned data using MLE
Parameters
----------
distfn : distribution instance
needs to have cdf method, as in scipy.stats
freq : ndarray, 1d
frequency count, e.g. obtained by histogram
binedges : ndarray, 1d
binedges including lower and upper bound
start : tuple or array_like ?
starting values, needs to have correct length
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option
added factorial | fitbinned | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def gmmobjective(params):
'''negative loglikelihood function of binned data
corresponds to multinomial
'''
prob = np.diff(distfn.cdf(binedges, *params))
momcond = freqnormed - prob
return np.dot(momcond*weights, momcond) | negative loglikelihood function of binned data
corresponds to multinomial | fitbinnedgmm.gmmobjective | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def fitbinnedgmm(distfn, freq, binedges, start, fixed=None, weightsoptimal=True):
'''estimate parameters of distribution function for binned data using GMM
Parameters
----------
distfn : distribution instance
needs to have cdf method, as in scipy.stats
freq : ndarray, 1d
frequency count, e.g. obtained by histogram
binedges : ndarray, 1d
binedges including lower and upper bound
start : tuple or array_like ?
starting values, needs to have correct length
fixed : None
not used yet
weightsoptimal : bool
If true, then the optimal weighting matrix for GMM is used. If false,
then the identity matrix is used
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option
added factorial
'''
if fixed is not None:
raise NotImplementedError
nobs = np.sum(freq)
if weightsoptimal:
weights = freq/float(nobs)
else:
weights = np.ones(len(freq))
freqnormed = freq/float(nobs)
# skip turning weights into matrix diag(freq/float(nobs))
def gmmobjective(params):
'''negative loglikelihood function of binned data
corresponds to multinomial
'''
prob = np.diff(distfn.cdf(binedges, *params))
momcond = freqnormed - prob
return np.dot(momcond*weights, momcond)
return optimize.fmin(gmmobjective, start) | estimate parameters of distribution function for binned data using GMM
Parameters
----------
distfn : distribution instance
needs to have cdf method, as in scipy.stats
freq : ndarray, 1d
frequency count, e.g. obtained by histogram
binedges : ndarray, 1d
binedges including lower and upper bound
start : tuple or array_like ?
starting values, needs to have correct length
fixed : None
not used yet
weightsoptimal : bool
If true, then the optimal weighting matrix for GMM is used. If false,
then the identity matrix is used
Returns
-------
paramest : ndarray
estimated parameters
Notes
-----
todo: add fixed parameter option
added factorial | fitbinnedgmm | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def logmps(params, xsorted, dist):
'''calculate negative log of Product-of-Spacings
Parameters
----------
params : array_like, tuple ?
parameters of the distribution funciton
xsorted : array_like
data that is already sorted
dist : instance of a distribution class
only cdf method is used
Returns
-------
mps : float
negative log of Product-of-Spacings
Notes
-----
MPS definiton from JKB page 233
'''
xcdf = np.r_[0., dist.cdf(xsorted, *params), 1.]
D = np.diff(xcdf)
return -np.log(D).mean() | calculate negative log of Product-of-Spacings
Parameters
----------
params : array_like, tuple ?
parameters of the distribution funciton
xsorted : array_like
data that is already sorted
dist : instance of a distribution class
only cdf method is used
Returns
-------
mps : float
negative log of Product-of-Spacings
Notes
-----
MPS definiton from JKB page 233 | logmps | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def getstartparams(dist, data):
'''get starting values for estimation of distribution parameters
Parameters
----------
dist : distribution instance
the distribution instance needs to have either a method fitstart
or an attribute numargs
data : ndarray
data for which preliminary estimator or starting value for
parameter estimation is desired
Returns
-------
x0 : ndarray
preliminary estimate or starting value for the parameters of
the distribution given the data, including loc and scale
'''
if hasattr(dist, 'fitstart'):
#x0 = getattr(dist, 'fitstart')(data)
x0 = dist.fitstart(data)
else:
if np.isfinite(dist.a):
x0 = np.r_[[1.]*dist.numargs, (data.min()-1), 1.]
else:
x0 = np.r_[[1.]*dist.numargs, (data.mean()-1), 1.]
return x0 | get starting values for estimation of distribution parameters
Parameters
----------
dist : distribution instance
the distribution instance needs to have either a method fitstart
or an attribute numargs
data : ndarray
data for which preliminary estimator or starting value for
parameter estimation is desired
Returns
-------
x0 : ndarray
preliminary estimate or starting value for the parameters of
the distribution given the data, including loc and scale | getstartparams | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def fit_mps(dist, data, x0=None):
'''Estimate distribution parameters with Maximum Product-of-Spacings
Parameters
----------
params : array_like, tuple ?
parameters of the distribution funciton
xsorted : array_like
data that is already sorted
dist : instance of a distribution class
only cdf method is used
Returns
-------
x : ndarray
estimates for the parameters of the distribution given the data,
including loc and scale
'''
xsorted = np.sort(data)
if x0 is None:
x0 = getstartparams(dist, xsorted)
args = (xsorted, dist)
print(x0)
#print(args)
return optimize.fmin(logmps, x0, args=args) | Estimate distribution parameters with Maximum Product-of-Spacings
Parameters
----------
params : array_like, tuple ?
parameters of the distribution funciton
xsorted : array_like
data that is already sorted
dist : instance of a distribution class
only cdf method is used
Returns
-------
x : ndarray
estimates for the parameters of the distribution given the data,
including loc and scale | fit_mps | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/estimators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/estimators.py | BSD-3-Clause |
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
a, b : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
D : float
KS statistic
p-value : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import ks_2samp
>>> #fix random seed to get the same result
>>> np.random.seed(12345678)
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
different distribution
we can reject the null hypothesis since the pvalue is below 1%
>>> rvs1 = stats.norm.rvs(size=n1,loc=0.,scale=1)
>>> rvs2 = stats.norm.rvs(size=n2,loc=0.5,scale=1.5)
>>> ks_2samp(rvs1,rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
slightly different distribution
we cannot reject the null hypothesis at a 10% or lower alpha since
the pvalue at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2,loc=0.01,scale=1.0)
>>> ks_2samp(rvs1,rvs3)
(0.10333333333333333, 0.14498781825751686)
identical distribution
we cannot reject the null hypothesis since the pvalue is high, 41%
>>> rvs4 = stats.norm.rvs(size=n2,loc=0.0,scale=1.0)
>>> ks_2samp(rvs1,rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1, data2 = lmap(np.asarray, (data1, data2))
n1 = data1.shape[0]
n2 = data2.shape[0]
n1 = len(data1)
n2 = len(data2)
data1 = np.sort(data1)
data2 = np.sort(data2)
data_all = np.concatenate([data1,data2])
#reminder: searchsorted inserts 2nd into 1st array
cdf1 = np.searchsorted(data1,data_all,side='right')/(1.0*n1)
cdf2 = (np.searchsorted(data2,data_all,side='right'))/(1.0*n2)
d = np.max(np.absolute(cdf1-cdf2))
#Note: d absolute not signed distance
en = np.sqrt(n1*n2/float(n1+n2))
try:
prob = ksprob((en+0.12+0.11/en)*d)
except Exception:
prob = 1.0
return d, prob | Computes the Kolmogorov-Smirnof statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
a, b : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
D : float
KS statistic
p-value : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import ks_2samp
>>> #fix random seed to get the same result
>>> np.random.seed(12345678)
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
different distribution
we can reject the null hypothesis since the pvalue is below 1%
>>> rvs1 = stats.norm.rvs(size=n1,loc=0.,scale=1)
>>> rvs2 = stats.norm.rvs(size=n2,loc=0.5,scale=1.5)
>>> ks_2samp(rvs1,rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
slightly different distribution
we cannot reject the null hypothesis at a 10% or lower alpha since
the pvalue at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2,loc=0.01,scale=1.0)
>>> ks_2samp(rvs1,rvs3)
(0.10333333333333333, 0.14498781825751686)
identical distribution
we cannot reject the null hypothesis since the pvalue is high, 41%
>>> rvs4 = stats.norm.rvs(size=n2,loc=0.0,scale=1.0)
>>> ks_2samp(rvs1,rvs4)
(0.07999999999999996, 0.41126949729859719) | ks_2samp | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def kstest(rvs, cdf, args=(), N=20, alternative = 'two_sided', mode='approx',**kwds):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two_sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str or array or callable
string: name of a distribution in scipy.stats
array: 1-D observations of random variables
callable: function to generate random variables, requires keyword
argument `size`
cdf : str or callable
string: name of a distribution in scipy.stats, if rvs is a string then
cdf can evaluate to `False` or be the same as rvs
callable: function to evaluate cdf
args : tuple, sequence
distribution parameters, used if rvs or cdf are strings
N : int
sample size if rvs is string or callable
alternative : 'two_sided' (default), 'less' or 'greater'
defines the alternative hypothesis (see explanation)
mode : 'approx' (default) or 'asymp'
defines the distribution used for calculating p-value
'approx' : use approximation to exact distribution of test statistic
'asymp' : use asymptotic distribution of test statistic
Returns
-------
D : float
KS test statistic, either D, D+ or D-
p-value : float
one-tailed or two-tailed p-value
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, G(x)<=F(x), resp. G(x)>=F(x).
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import kstest
>>> x = np.linspace(-15,15,9)
>>> kstest(x,'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> kstest('norm','',N=100)
(0.058352892479417884, 0.88531190944151261)
is equivalent to this
>>> np.random.seed(987654321)
>>> kstest(stats.norm.rvs(size=100),'norm')
(0.058352892479417884, 0.88531190944151261)
Test against one-sided alternative hypothesis:
>>> np.random.seed(987654321)
Shift distribution to larger values, so that cdf_dgp(x)< norm.cdf(x):
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Do not reject equal distribution against alternative hypothesis: greater
>>> kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
Testing t distributed random variables against normal distribution:
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the kstest does not reject the hypothesis that the sample
came from the normal distribution
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at a alpha=10% level
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, str):
#cdf = getattr(stats, rvs).cdf
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError('if rvs is string, cdf has to be the same distribution')
if isinstance(cdf, str):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size':N}
vals = np.sort(rvs(*args,**kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
if alternative in ['two_sided', 'greater']:
Dplus = (np.arange(1.0, N+1)/N - cdfvals).max()
if alternative == 'greater':
return Dplus, distributions.ksone.sf(Dplus,N)
if alternative in ['two_sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return Dmin, distributions.ksone.sf(Dmin,N)
if alternative == 'two_sided':
D = np.max([Dplus,Dmin])
if mode == 'asymp':
return D, distributions.kstwobign.sf(D*np.sqrt(N))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D*np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000.0 :
return D, distributions.kstwobign.sf(D*np.sqrt(N))
else:
return D, distributions.ksone.sf(D,N)*2 | Perform the Kolmogorov-Smirnov test for goodness of fit
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two_sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str or array or callable
string: name of a distribution in scipy.stats
array: 1-D observations of random variables
callable: function to generate random variables, requires keyword
argument `size`
cdf : str or callable
string: name of a distribution in scipy.stats, if rvs is a string then
cdf can evaluate to `False` or be the same as rvs
callable: function to evaluate cdf
args : tuple, sequence
distribution parameters, used if rvs or cdf are strings
N : int
sample size if rvs is string or callable
alternative : 'two_sided' (default), 'less' or 'greater'
defines the alternative hypothesis (see explanation)
mode : 'approx' (default) or 'asymp'
defines the distribution used for calculating p-value
'approx' : use approximation to exact distribution of test statistic
'asymp' : use asymptotic distribution of test statistic
Returns
-------
D : float
KS test statistic, either D, D+ or D-
p-value : float
one-tailed or two-tailed p-value
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, G(x)<=F(x), resp. G(x)>=F(x).
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import kstest
>>> x = np.linspace(-15,15,9)
>>> kstest(x,'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> kstest('norm','',N=100)
(0.058352892479417884, 0.88531190944151261)
is equivalent to this
>>> np.random.seed(987654321)
>>> kstest(stats.norm.rvs(size=100),'norm')
(0.058352892479417884, 0.88531190944151261)
Test against one-sided alternative hypothesis:
>>> np.random.seed(987654321)
Shift distribution to larger values, so that cdf_dgp(x)< norm.cdf(x):
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Do not reject equal distribution against alternative hypothesis: greater
>>> kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
Testing t distributed random variables against normal distribution:
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the kstest does not reject the hypothesis that the sample
came from the normal distribution
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at a alpha=10% level
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224) | kstest | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def v(self):
'''Kuiper'''
return self.d_plus + self.d_minus | Kuiper | v | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def wsqu(self):
'''Cramer von Mises'''
nobs = self.nobs
cdfvals = self.cdfvals
#use literal formula, TODO: simplify with arange(,,2)
wsqu = ((cdfvals - (2. * np.arange(1., nobs+1) - 1)/nobs/2.)**2).sum() \
+ 1./nobs/12.
return wsqu | Cramer von Mises | wsqu | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def asqu(self):
'''Stephens 1974, does not have p-value formula for A^2'''
nobs = self.nobs
cdfvals = self.cdfvals
asqu = -((2. * np.arange(1., nobs+1) - 1) *
(np.log(cdfvals) + np.log(1-cdfvals[::-1]) )).sum()/nobs - nobs
return asqu | Stephens 1974, does not have p-value formula for A^2 | asqu | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def asquare(cdfvals, axis=0):
'''vectorized Anderson Darling A^2, Stephens 1974'''
ndim = len(cdfvals.shape)
nobs = cdfvals.shape[axis]
slice_reverse = [slice(None)] * ndim #might make copy if not specific axis???
islice = [None] * ndim
islice[axis] = slice(None)
slice_reverse[axis] = slice(None, None, -1)
asqu = -((2. * np.arange(1., nobs+1)[tuple(islice)] - 1) *
(np.log(cdfvals) + np.log(1-cdfvals[tuple(slice_reverse)]))/nobs).sum(axis) \
- nobs
return asqu | vectorized Anderson Darling A^2, Stephens 1974 | asquare | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def bootstrap(distr, args=(), nobs=200, nrep=100, value=None, batch_size=None):
'''Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
assumes vectorized fit_vec method,
builds and analyses (nobs, nrep) sample in one step
rename function to less generic
this works also with nrep=1
'''
#signature similar to kstest ?
#delegate to fn ?
#rvs_kwds = {'size':(nobs, nrep)}
#rvs_kwds.update(kwds)
#it will be better to build a separate batch function that calls bootstrap
#keep batch if value is true, but batch iterate from outside if stat is returned
if batch_size is not None:
if value is None:
raise ValueError('using batching requires a value')
n_batch = int(np.ceil(nrep/float(batch_size)))
count = 0
for irep in range(n_batch):
rvs = distr.rvs(args, **{'size':(batch_size, nobs)})
params = distr.fit_vec(rvs, axis=1)
params = lmap(lambda x: np.expand_dims(x, 1), params)
cdfvals = np.sort(distr.cdf(rvs, params), axis=1)
stat = asquare(cdfvals, axis=1)
count += (stat >= value).sum()
return count / float(n_batch * batch_size)
else:
#rvs = distr.rvs(args, **kwds) #extension to distribution kwds ?
rvs = distr.rvs(args, **{'size':(nrep, nobs)})
params = distr.fit_vec(rvs, axis=1)
params = lmap(lambda x: np.expand_dims(x, 1), params)
cdfvals = np.sort(distr.cdf(rvs, params), axis=1)
stat = asquare(cdfvals, axis=1)
if value is None: #return all bootstrap results
stat_sorted = np.sort(stat)
return stat_sorted
else: #calculate and return specific p-value
return (stat >= value).mean() | Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
assumes vectorized fit_vec method,
builds and analyses (nobs, nrep) sample in one step
rename function to less generic
this works also with nrep=1 | bootstrap | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def bootstrap2(value, distr, args=(), nobs=200, nrep=100):
'''Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
non vectorized, loops over all parametric bootstrap replications and calculates
and returns specific p-value,
rename function to less generic
'''
#signature similar to kstest ?
#delegate to fn ?
#rvs_kwds = {'size':(nobs, nrep)}
#rvs_kwds.update(kwds)
count = 0
for irep in range(nrep):
#rvs = distr.rvs(args, **kwds) #extension to distribution kwds ?
rvs = distr.rvs(args, **{'size':nobs})
params = distr.fit_vec(rvs)
cdfvals = np.sort(distr.cdf(rvs, params))
stat = asquare(cdfvals, axis=0)
count += (stat >= value)
return count * 1. / nrep | Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
non vectorized, loops over all parametric bootstrap replications and calculates
and returns specific p-value,
rename function to less generic | bootstrap2 | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/gof_new.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/gof_new.py | BSD-3-Clause |
def mutualinfo_kde(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
yx = np.vstack((y,x))
kde_x = gaussian_kde(x)(x)
kde_y = gaussian_kde(y)(y)
kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_yx) - np.log(kde_x) - np.log(kde_y)
mi = mi_obs.sum() / nobs
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi | mutual information of two random variables estimated with kde | mutualinfo_kde | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_measures.py | BSD-3-Clause |
def mutualinfo_kde_2sample(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
kde_x = gaussian_kde(x.T)(x.T)
kde_y = gaussian_kde(y.T)(x.T)
#kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_x) - np.log(kde_y)
if len(mi_obs) != nobs:
raise ValueError("Wrong number of observations")
mi = mi_obs.mean()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi | mutual information of two random variables estimated with kde | mutualinfo_kde_2sample | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_measures.py | BSD-3-Clause |
def mutualinfo_binned(y, x, bins, normed=True):
'''mutual information of two random variables estimated with kde
Notes
-----
bins='auto' selects the number of bins so that approximately 5 observations
are expected to be in each bin under the assumption of independence. This
follows roughly the description in Kahn et al. 2007
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
## fyx, binsy, binsx = np.histogram2d(y, x, bins=bins)
## fx, binsx_ = np.histogram(x, bins=binsx)
## fy, binsy_ = np.histogram(y, bins=binsy)
if bins == 'auto':
ys = np.sort(y)
xs = np.sort(x)
#quantiles = np.array([0,0.25, 0.4, 0.6, 0.75, 1])
qbin_sqr = np.sqrt(5./nobs)
quantiles = np.linspace(0, 1, 1./qbin_sqr)
quantile_index = ((nobs-1)*quantiles).astype(int)
#move edges so that they do not coincide with an observation
shift = 1e-6 + np.ones(quantiles.shape)
shift[0] -= 2*1e-6
binsy = ys[quantile_index] + shift
binsx = xs[quantile_index] + shift
elif np.size(bins) == 1:
binsy = bins
binsx = bins
elif (len(bins) == 2):
binsy, binsx = bins
## if np.size(bins[0]) == 1:
## binsx = bins[0]
## if np.size(bins[1]) == 1:
## binsx = bins[1]
fx, binsx = np.histogram(x, bins=binsx)
fy, binsy = np.histogram(y, bins=binsy)
fyx, binsy, binsx = np.histogram2d(y, x, bins=(binsy, binsx))
pyx = fyx * 1. / nobs
px = fx * 1. / nobs
py = fy * 1. / nobs
mi_obs = pyx * (np.log(pyx+1e-10) - np.log(py)[:,None] - np.log(px))
mi = mi_obs.sum()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed, (pyx, py, px, binsy, binsx), mi_obs
else:
return mi | mutual information of two random variables estimated with kde
Notes
-----
bins='auto' selects the number of bins so that approximately 5 observations
are expected to be in each bin under the assumption of independence. This
follows roughly the description in Kahn et al. 2007 | mutualinfo_binned | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_measures.py | BSD-3-Clause |
def __init__(self, mixing_dist, base_dist, bd_args_func, bd_kwds_func,
cutoff=1e-3):
'''create a mixture distribution
Parameters
----------
mixing_dist : discrete frozen distribution
mixing distribution
base_dist : continuous distribution
parametrized distributions in the mixture
bd_args_func : callable
function that builds the tuple of args for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty tuple or
a tuple of arrays.
bd_kwds_func : callable
function that builds the dictionary of kwds for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty dictionary or
a dictionary with arrays as values.
cutoff : float
If the mixing distribution has infinite support, then the
distribution is truncated with approximately (subject to integer
conversion) the cutoff probability in the missing tail. Random
draws that are outside the truncated range are clipped, that is
assigned to the highest or lowest value in the truncated support.
'''
self.mixing_dist = mixing_dist
self.base_dist = base_dist
#self.bd_args = bd_args
if not np.isneginf(mixing_dist.dist.a):
lower = mixing_dist.dist.a
else:
lower = mixing_dist.ppf(1e-4)
if not np.isposinf(mixing_dist.dist.b):
upper = mixing_dist.dist.b
else:
upper = mixing_dist.isf(1e-4)
self.ma = lower
self.mb = upper
mixing_support = np.arange(lower, upper+1)
self.mixing_probs = mixing_dist.pmf(mixing_support)
self.bd_args = bd_args_func(mixing_support)
self.bd_kwds = bd_kwds_func(mixing_support) | create a mixture distribution
Parameters
----------
mixing_dist : discrete frozen distribution
mixing distribution
base_dist : continuous distribution
parametrized distributions in the mixture
bd_args_func : callable
function that builds the tuple of args for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty tuple or
a tuple of arrays.
bd_kwds_func : callable
function that builds the dictionary of kwds for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty dictionary or
a dictionary with arrays as values.
cutoff : float
If the mixing distribution has infinite support, then the
distribution is truncated with approximately (subject to integer
conversion) the cutoff probability in the missing tail. Random
draws that are outside the truncated range are clipped, that is
assigned to the highest or lowest value in the truncated support. | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/otherdist.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/otherdist.py | BSD-3-Clause |
def _get_clip_lower(self, kwds):
'''helper method to get clip_lower from kwds or attribute
'''
if 'clip_lower' not in kwds:
clip_lower = self.clip_lower
else:
clip_lower = kwds.pop('clip_lower')
return clip_lower, kwds | helper method to get clip_lower from kwds or attribute | _get_clip_lower | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/otherdist.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/otherdist.py | BSD-3-Clause |
def chi2_pdf(self, x, df):
'''pdf of chi-square distribution'''
#from scipy.stats.distributions
Px = x**(df/2.0-1)*np.exp(-x/2.0)
Px /= special.gamma(df/2.0)* 2**(df/2.0)
return Px | pdf of chi-square distribution | chi2_pdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/multivariate.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/multivariate.py | BSD-3-Clause |
def mvstdtprob(a, b, R, df, ieps=1e-5, quadkwds=None, mvstkwds=None):
"""
Probability of rectangular area of standard t distribution
assumes mean is zero and R is correlation matrix
Notes
-----
This function does not calculate the estimate of the combined error
between the underlying multivariate normal probability calculations
and the integration.
"""
kwds = dict(args=(a, b, R, df), epsabs=1e-4, epsrel=1e-2, limit=150)
if quadkwds is not None:
kwds.update(quadkwds)
lower, upper = chi.ppf([ieps, 1 - ieps], df)
res, err = integrate.quad(funbgh2, lower, upper, **kwds)
prob = res * bghfactor(df)
return prob | Probability of rectangular area of standard t distribution
assumes mean is zero and R is correlation matrix
Notes
-----
This function does not calculate the estimate of the combined error
between the underlying multivariate normal probability calculations
and the integration. | mvstdtprob | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/multivariate.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/multivariate.py | BSD-3-Clause |
def multivariate_t_rvs(m, S, df=np.inf, n=1):
'''generate random variables of multivariate t distribution
Parameters
----------
m : array_like
mean of random variable, length determines dimension of random variable
S : array_like
square array of covariance matrix
df : int or float
degrees of freedom
n : int
number of observations, return random array will be (n, len(m))
Returns
-------
rvs : ndarray, (n, len(m))
each row is an independent draw of a multivariate t distributed
random variable
'''
m = np.asarray(m)
d = len(m)
if df == np.inf:
x = np.ones(n)
else:
x = np.random.chisquare(df, n)/df
z = np.random.multivariate_normal(np.zeros(d),S,(n,))
return m + z/np.sqrt(x)[:,None] # same output format as random.multivariate_normal | generate random variables of multivariate t distribution
Parameters
----------
m : array_like
mean of random variable, length determines dimension of random variable
S : array_like
square array of covariance matrix
df : int or float
degrees of freedom
n : int
number of observations, return random array will be (n, len(m))
Returns
-------
rvs : ndarray, (n, len(m))
each row is an independent draw of a multivariate t distributed
random variable | multivariate_t_rvs | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/multivariate.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/multivariate.py | BSD-3-Clause |
def prob_bv_rectangle(lower, upper, cdf):
'''helper function for probability of a rectangle in a bivariate distribution
Parameters
----------
lower : array_like
tuple of lower integration bounds
upper : array_like
tuple of upper integration bounds
cdf : callable
cdf(x,y), cumulative distribution function of bivariate distribution
how does this generalize to more than 2 variates ?
'''
probuu = cdf(*upper)
probul = cdf(upper[0], lower[1])
problu = cdf(lower[0], upper[1])
probll = cdf(*lower)
return probuu - probul - problu + probll | helper function for probability of a rectangle in a bivariate distribution
Parameters
----------
lower : array_like
tuple of lower integration bounds
upper : array_like
tuple of upper integration bounds
cdf : callable
cdf(x,y), cumulative distribution function of bivariate distribution
how does this generalize to more than 2 variates ? | prob_bv_rectangle | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/quantize.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/quantize.py | BSD-3-Clause |
def prob_mv_grid(bins, cdf, axis=-1):
'''helper function for probability of a rectangle grid in a multivariate distribution
how does this generalize to more than 2 variates ?
bins : tuple
tuple of bin edges, currently it is assumed that they broadcast
correctly
'''
if not isinstance(bins, np.ndarray):
bins = lmap(np.asarray, bins)
n_dim = len(bins)
bins_ = []
#broadcast if binedges are 1d
if all(lmap(np.ndim, bins) == np.ones(n_dim)):
for d in range(n_dim):
sl = [None]*n_dim
sl[d] = slice(None)
bins_.append(bins[d][sl])
else: #assume it is already correctly broadcasted
n_dim = bins.shape[0]
bins_ = bins
print(len(bins))
cdf_values = cdf(bins_)
probs = cdf_values.copy()
for d in range(n_dim):
probs = np.diff(probs, axis=d)
return probs | helper function for probability of a rectangle grid in a multivariate distribution
how does this generalize to more than 2 variates ?
bins : tuple
tuple of bin edges, currently it is assumed that they broadcast
correctly | prob_mv_grid | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/quantize.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/quantize.py | BSD-3-Clause |
def prob_quantize_cdf(binsx, binsy, cdf):
'''quantize a continuous distribution given by a cdf
Parameters
----------
binsx : array_like, 1d
binedges
'''
binsx = np.asarray(binsx)
binsy = np.asarray(binsy)
nx = len(binsx) - 1
ny = len(binsy) - 1
probs = np.nan * np.ones((nx, ny)) #np.empty(nx,ny)
cdf_values = cdf(binsx[:,None], binsy)
def cdf_func(x, y):
return cdf_values[x, y]
for xind in range(1, nx+1):
for yind in range(1, ny+1):
upper = (xind, yind)
lower = (xind-1, yind-1)
#print upper,lower,
probs[xind-1,yind-1] = prob_bv_rectangle(lower, upper, cdf_func)
assert not np.isnan(probs).any()
return probs | quantize a continuous distribution given by a cdf
Parameters
----------
binsx : array_like, 1d
binedges | prob_quantize_cdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/quantize.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/quantize.py | BSD-3-Clause |
def prob_quantize_cdf_old(binsx, binsy, cdf):
'''quantize a continuous distribution given by a cdf
old version without precomputing cdf values
Parameters
----------
binsx : array_like, 1d
binedges
'''
binsx = np.asarray(binsx)
binsy = np.asarray(binsy)
nx = len(binsx) - 1
ny = len(binsy) - 1
probs = np.nan * np.ones((nx, ny)) #np.empty(nx,ny)
for xind in range(1, nx+1):
for yind in range(1, ny+1):
upper = (binsx[xind], binsy[yind])
lower = (binsx[xind-1], binsy[yind-1])
#print upper,lower,
probs[xind-1,yind-1] = prob_bv_rectangle(lower, upper, cdf)
assert not np.isnan(probs).any()
return probs | quantize a continuous distribution given by a cdf
old version without precomputing cdf values
Parameters
----------
binsx : array_like, 1d
binedges | prob_quantize_cdf_old | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/quantize.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/quantize.py | BSD-3-Clause |
def expect_mc(dist, func=lambda x: 1, size=50000):
'''calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
number of random samples to use in the Monte Carlo integration,
Notes
-----
this does not batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
integrate probability that both observations are negative
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc(mvn, lambda x: (x<np.array([0,0])).all(-1), size=100000)
0.25306000000000001
get tail probabilities of marginal distribution (should be 0.1)
>>> c = stats.norm.isf(0.05, scale=np.sqrt(2.))
>>> expect_mc(mvn, lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09969, 0.0986 ])
or calling the method
>>> mvn.expect_mc(lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09937, 0.10075])
'''
def fun(x):
return func(x) # * dist.pdf(x)
rvs = dist.rvs(size=size)
return fun(rvs).mean(0) | calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
number of random samples to use in the Monte Carlo integration,
Notes
-----
this does not batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
integrate probability that both observations are negative
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc(mvn, lambda x: (x<np.array([0,0])).all(-1), size=100000)
0.25306000000000001
get tail probabilities of marginal distribution (should be 0.1)
>>> c = stats.norm.isf(0.05, scale=np.sqrt(2.))
>>> expect_mc(mvn, lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09969, 0.0986 ])
or calling the method
>>> mvn.expect_mc(lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09937, 0.10075]) | expect_mc | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def expect_mc_bounds(dist, func=lambda x: 1, size=50000, lower=None, upper=None,
conditional=False, overfact=1.2):
'''calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
minimum number of random samples to use in the Monte Carlo integration,
the actual number used can be larger because of oversampling.
lower : None or array_like
lower integration bounds, if None, then it is set to -inf
upper : None or array_like
upper integration bounds, if None, then it is set to +inf
conditional : bool
If true, then the expectation is conditional on being in within
[lower, upper] bounds, otherwise it is unconditional
overfact : float
oversampling factor, the actual number of random variables drawn in
each attempt are overfact * remaining draws. Extra draws are also
used in the integration.
Notes
-----
this does not batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.ones(x.shape[0]),
lower=[-10,-10],upper=[0,0])
0.24990416666666668
get 3 marginal moments with one integration
>>> mvn = mve.MVNormal([0,0],1.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.dstack([x, x**2, x**3, x**4]),
lower=[-np.inf,-np.inf], upper=[np.inf,np.inf])
array([[ 2.88629497e-03, 9.96706297e-01, -2.51005344e-03,
2.95240921e+00],
[ -5.48020088e-03, 9.96004409e-01, -2.23803072e-02,
2.96289203e+00]])
>>> from scipy import stats
>>> [stats.norm.moment(i) for i in [1,2,3,4]]
[0.0, 1.0, 0.0, 3.0]
'''
#call rvs once to find length of random vector
rvsdim = dist.rvs(size=1).shape[-1]
if lower is None:
lower = -np.inf * np.ones(rvsdim)
else:
lower = np.asarray(lower)
if upper is None:
upper = np.inf * np.ones(rvsdim)
else:
upper = np.asarray(upper)
def fun(x):
return func(x) # * dist.pdf(x)
rvsli = []
used = 0 #remain = size #inplace changes size
total = 0
while True:
remain = size - used #just a temp variable
rvs = dist.rvs(size=int(remain * overfact))
total += int(size * overfact)
rvsok = rvs[((rvs >= lower) & (rvs <= upper)).all(-1)]
#if rvsok.ndim == 1: #possible shape problems if only 1 random vector
rvsok = np.atleast_2d(rvsok)
used += rvsok.shape[0]
rvsli.append(rvsok) #[:remain]) use extras instead
print(used)
if used >= size:
break
rvs = np.vstack(rvsli)
print(rvs.shape)
assert used == rvs.shape[0] #saftey check
mean_conditional = fun(rvs).mean(0)
if conditional:
return mean_conditional
else:
return mean_conditional * (used * 1. / total) | calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
minimum number of random samples to use in the Monte Carlo integration,
the actual number used can be larger because of oversampling.
lower : None or array_like
lower integration bounds, if None, then it is set to -inf
upper : None or array_like
upper integration bounds, if None, then it is set to +inf
conditional : bool
If true, then the expectation is conditional on being in within
[lower, upper] bounds, otherwise it is unconditional
overfact : float
oversampling factor, the actual number of random variables drawn in
each attempt are overfact * remaining draws. Extra draws are also
used in the integration.
Notes
-----
this does not batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.ones(x.shape[0]),
lower=[-10,-10],upper=[0,0])
0.24990416666666668
get 3 marginal moments with one integration
>>> mvn = mve.MVNormal([0,0],1.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.dstack([x, x**2, x**3, x**4]),
lower=[-np.inf,-np.inf], upper=[np.inf,np.inf])
array([[ 2.88629497e-03, 9.96706297e-01, -2.51005344e-03,
2.95240921e+00],
[ -5.48020088e-03, 9.96004409e-01, -2.23803072e-02,
2.96289203e+00]])
>>> from scipy import stats
>>> [stats.norm.moment(i) for i in [1,2,3,4]]
[0.0, 1.0, 0.0, 3.0] | expect_mc_bounds | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def bivariate_normal(x, mu, cov):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
X, Y = np.transpose(x)
mux, muy = mu
sigmax, sigmaxy, tmp, sigmay = np.ravel(cov)
sigmax, sigmay = np.sqrt(sigmax), np.sqrt(sigmay)
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom | Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld. | bivariate_normal | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def kl(self, other):
'''Kullback-Leibler divergence between this and another distribution
int f(x) (log f(x) - log g(x)) dx
where f is the pdf of self, and g is the pdf of other
uses double integration with scipy.integrate.dblquad
limits currently hardcoded
'''
def fun(x):
return self.logpdf(x) - other.logpdf(x)
return self.expect(fun) | Kullback-Leibler divergence between this and another distribution
int f(x) (log f(x) - log g(x)) dx
where f is the pdf of self, and g is the pdf of other
uses double integration with scipy.integrate.dblquad
limits currently hardcoded | kl | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def __init__(self, mean, sigma, *args, **kwds):
'''initialize instance
Parameters
----------
mean : array_like
parameter mu (might be renamed), for symmetric distributions this
is the mean
sigma : array_like, 2d
dispersion matrix, covariance matrix in normal distribution, but
only proportional to covariance matrix in t distribution
args : list
distribution specific arguments, e.g. df for t distribution
kwds : dict
currently not used
'''
self.extra_args = []
self.mean = np.asarray(mean)
self.sigma = sigma = np.asarray(sigma)
sigma = np.squeeze(sigma)
self.nvars = nvars = len(mean)
#self.covchol = np.linalg.cholesky(sigma)
#in the following sigma is original, self.sigma is full matrix
if sigma.shape == ():
#iid
self.sigma = np.eye(nvars) * sigma
self.sigmainv = np.eye(nvars) / sigma
self.cholsigmainv = np.eye(nvars) / np.sqrt(sigma)
elif (sigma.ndim == 1) and (len(sigma) == nvars):
#independent heteroskedastic
self.sigma = np.diag(sigma)
self.sigmainv = np.diag(1. / sigma)
self.cholsigmainv = np.diag( 1. / np.sqrt(sigma))
elif sigma.shape == (nvars, nvars): #python tuple comparison
#general
self.sigmainv = np.linalg.pinv(sigma)
self.cholsigmainv = np.linalg.cholesky(self.sigmainv).T
else:
raise ValueError('sigma has invalid shape')
#store logdetsigma for logpdf
self.logdetsigma = np.log(np.linalg.det(self.sigma)) | initialize instance
Parameters
----------
mean : array_like
parameter mu (might be renamed), for symmetric distributions this
is the mean
sigma : array_like, 2d
dispersion matrix, covariance matrix in normal distribution, but
only proportional to covariance matrix in t distribution
args : list
distribution specific arguments, e.g. df for t distribution
kwds : dict
currently not used | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def rvs(self, size=1):
'''random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
'''
raise NotImplementedError | random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension | rvs | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
does not work now because of dot in whiten
'''
raise NotImplementedError | logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
does not work now because of dot in whiten | logpdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def cdf(self, x, **kwds):
'''cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector
'''
raise NotImplementedError | cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector | cdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def affine_transformed(self, shift, scale_matrix):
'''affine transformation define in subclass because of distribution
specific restrictions'''
#implemented in subclass at least for now
raise NotImplementedError | affine transformation define in subclass because of distribution
specific restrictions | affine_transformed | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def whiten(self, x):
"""
whiten the data by linear transformation
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholsigmainv.T)
Notes
-----
This only does rescaling, it does not subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable.
"""
x = np.asarray(x)
return np.dot(x, self.cholsigmainv.T) | whiten the data by linear transformation
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholsigmainv.T)
Notes
-----
This only does rescaling, it does not subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable. | whiten | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def pdf(self, x):
'''probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
pdf : float or array
probability density value of each random vector
'''
return np.exp(self.logpdf(x)) | probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
pdf : float or array
probability density value of each random vector | pdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def standardize(self, x):
'''standardize the random variable, i.e. subtract mean and whiten
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x - self.mean, self.cholsigmainv.T)
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean.
'''
return self.whiten(x - self.mean) | standardize the random variable, i.e. subtract mean and whiten
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x - self.mean, self.cholsigmainv.T)
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean. | standardize | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def standardized(self):
'''return new standardized MVNormal instance
'''
return self.affine_transformed(-self.mean, self.cholsigmainv) | return new standardized MVNormal instance | standardized | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def normalize(self, x):
'''normalize the random variable, i.e. subtract mean and rescale
The distribution will have zero mean and sigma equal to correlation
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
(x - self.mean)/std_sigma
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean.
'''
std_ = np.atleast_2d(self.std_sigma)
return (x - self.mean)/std_ #/std_.T | normalize the random variable, i.e. subtract mean and rescale
The distribution will have zero mean and sigma equal to correlation
Parameters
----------
x : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
(x - self.mean)/std_sigma
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean. | normalize | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def normalized(self, demeaned=True):
'''return a normalized distribution where sigma=corr
if demeaned is True, then mean will be set to zero
'''
if demeaned:
mean_new = np.zeros_like(self.mean)
else:
mean_new = self.mean / self.std_sigma
sigma_new = self.corr
args = [getattr(self, ea) for ea in self.extra_args]
return self.__class__(mean_new, sigma_new, *args) | return a normalized distribution where sigma=corr
if demeaned is True, then mean will be set to zero | normalized | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def normalized2(self, demeaned=True):
'''return a normalized distribution where sigma=corr
second implementation for testing affine transformation
'''
if demeaned:
shift = -self.mean
else:
shift = self.mean * (1. / self.std_sigma - 1.)
return self.affine_transformed(shift, np.diag(1. / self.std_sigma)) | return a normalized distribution where sigma=corr
second implementation for testing affine transformation | normalized2 | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def std(self):
'''standard deviation, square root of diagonal elements of cov
'''
return np.sqrt(np.diag(self.cov)) | standard deviation, square root of diagonal elements of cov | std | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def std_sigma(self):
'''standard deviation, square root of diagonal elements of sigma
'''
return np.sqrt(np.diag(self.sigma)) | standard deviation, square root of diagonal elements of sigma | std_sigma | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def corr(self):
'''correlation matrix'''
return self.cov / np.outer(self.std, self.std) | correlation matrix | corr | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def marginal(self, indices):
'''return marginal distribution for variables given by indices
this should be correct for normal and t distribution
Parameters
----------
indices : array_like, int
list of indices of variables in the marginal distribution
Returns
-------
mvdist : instance
new instance of the same multivariate distribution class that
contains the marginal distribution of the variables given in
indices
'''
indices = np.asarray(indices)
mean_new = self.mean[indices]
sigma_new = self.sigma[indices[:,None], indices]
args = [getattr(self, ea) for ea in self.extra_args]
return self.__class__(mean_new, sigma_new, *args) | return marginal distribution for variables given by indices
this should be correct for normal and t distribution
Parameters
----------
indices : array_like, int
list of indices of variables in the marginal distribution
Returns
-------
mvdist : instance
new instance of the same multivariate distribution class that
contains the marginal distribution of the variables given in
indices | marginal | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.