code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def loglikeobs(self, params, pen_weight=None, **kwds):
"""
Log-likelihood of model observations at params
"""
if pen_weight is None:
pen_weight = self.pen_weight
llf = super().loglikeobs(params, **kwds)
nobs_llf = float(llf.shape[0])
if pen_weight != 0:
scale = self._handle_scale(params, **kwds)
llf -= 1/scale * pen_weight / nobs_llf * self.penal.func(params)
return llf | Log-likelihood of model observations at params | loglikeobs | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def score_numdiff(self, params, pen_weight=None, method='fd', **kwds):
"""score based on finite difference derivative
"""
if pen_weight is None:
pen_weight = self.pen_weight
def loglike(p):
return self.loglike(p, pen_weight=pen_weight, **kwds)
if method == 'cs':
return approx_fprime_cs(params, loglike)
elif method == 'fd':
return approx_fprime(params, loglike, centered=True)
else:
raise ValueError('method not recognized, should be "fd" or "cs"') | score based on finite difference derivative | score_numdiff | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def score(self, params, pen_weight=None, **kwds):
"""
Gradient of model at params
"""
if pen_weight is None:
pen_weight = self.pen_weight
sc = super().score(params, **kwds)
if pen_weight != 0:
scale = self._handle_scale(params, **kwds)
sc -= 1/scale * pen_weight * self.penal.deriv(params)
return sc | Gradient of model at params | score | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def score_obs(self, params, pen_weight=None, **kwds):
"""
Gradient of model observations at params
"""
if pen_weight is None:
pen_weight = self.pen_weight
sc = super().score_obs(params, **kwds)
nobs_sc = float(sc.shape[0])
if pen_weight != 0:
scale = self._handle_scale(params, **kwds)
sc -= 1/scale * pen_weight / nobs_sc * self.penal.deriv(params)
return sc | Gradient of model observations at params | score_obs | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def hessian_numdiff(self, params, pen_weight=None, **kwds):
"""hessian based on finite difference derivative
"""
if pen_weight is None:
pen_weight = self.pen_weight
def loglike(p):
return self.loglike(p, pen_weight=pen_weight, **kwds)
from statsmodels.tools.numdiff import approx_hess
return approx_hess(params, loglike) | hessian based on finite difference derivative | hessian_numdiff | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def hessian(self, params, pen_weight=None, **kwds):
"""
Hessian of model at params
"""
if pen_weight is None:
pen_weight = self.pen_weight
hess = super().hessian(params, **kwds)
if pen_weight != 0:
scale = self._handle_scale(params, **kwds)
h = self.penal.deriv2(params)
if h.ndim == 1:
hess -= 1/scale * np.diag(pen_weight * h)
else:
hess -= 1/scale * pen_weight * h
return hess | Hessian of model at params | hessian | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def fit(self, method=None, trim=None, **kwds):
"""minimize negative penalized log-likelihood
Parameters
----------
method : None or str
Method specifies the scipy optimizer as in nonlinear MLE models.
trim : {bool, float}
Default is False or None, which uses no trimming.
If trim is True or a float, then small parameters are set to zero.
If True, then a default threshold is used. If trim is a float, then
it will be used as threshold.
The default threshold is currently 1e-4, but it will change in
future and become penalty function dependent.
kwds : extra keyword arguments
This keyword arguments are treated in the same way as in the
fit method of the underlying model class.
Specifically, additional optimizer keywords and cov_type related
keywords can be added.
"""
# If method is None, then we choose a default method ourselves
# TODO: temporary hack, need extra fit kwds
# we need to rule out fit methods in a model that will not work with
# penalization
from statsmodels.gam.generalized_additive_model import GLMGam
from statsmodels.genmod.generalized_linear_model import GLM
# Only for fit methods supporting max_start_irls
if isinstance(self, (GLM, GLMGam)):
kwds.update({'max_start_irls': 0})
# currently we use `bfgs` by default
if method is None:
method = 'bfgs'
if trim is None:
trim = False
res = super().fit(method=method, **kwds)
if trim is False:
# note boolean check for "is False", not "False_like"
return res
else:
if trim is True:
trim = 1e-4 # trim threshold
# TODO: make it penal function dependent
# temporary standin, only checked for Poisson and GLM,
# and is computationally inefficient
drop_index = np.nonzero(np.abs(res.params) < trim)[0]
keep_index = np.nonzero(np.abs(res.params) > trim)[0]
if drop_index.any():
# TODO: do we need to add results attributes?
res_aux = self._fit_zeros(keep_index, **kwds)
return res_aux
else:
return res | minimize negative penalized log-likelihood
Parameters
----------
method : None or str
Method specifies the scipy optimizer as in nonlinear MLE models.
trim : {bool, float}
Default is False or None, which uses no trimming.
If trim is True or a float, then small parameters are set to zero.
If True, then a default threshold is used. If trim is a float, then
it will be used as threshold.
The default threshold is currently 1e-4, but it will change in
future and become penalty function dependent.
kwds : extra keyword arguments
This keyword arguments are treated in the same way as in the
fit method of the underlying model class.
Specifically, additional optimizer keywords and cov_type related
keywords can be added. | fit | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def from_formula_parser(cls, lc):
"""class method to create instance from patsy instance
Parameters
----------
lc : instance
instance of patsy LinearConstraint, or other instances that have
attributes ``lc.coefs, lc.constants, lc.variable_names``
Returns
-------
instance of this class
"""
try:
return cls(lc.constraint_matrix, lc.constraint_values, lc.variable_names)
except AttributeError:
return cls(lc.coefs, lc.constants, lc.variable_names) | class method to create instance from patsy instance
Parameters
----------
lc : instance
instance of patsy LinearConstraint, or other instances that have
attributes ``lc.coefs, lc.constants, lc.variable_names``
Returns
-------
instance of this class | from_formula_parser | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def expand(self, params_reduced):
"""transform from the reduced to the full parameter space
Parameters
----------
params_reduced : array_like
parameters in the transformed space
Returns
-------
params : array_like
parameters in the original space
Notes
-----
If the restriction is not homogeneous, i.e. q is not equal to zero,
then this is an affine transform.
"""
params_reduced = np.asarray(params_reduced)
return self.transf_mat.dot(params_reduced.T).T + self.constant | transform from the reduced to the full parameter space
Parameters
----------
params_reduced : array_like
parameters in the transformed space
Returns
-------
params : array_like
parameters in the original space
Notes
-----
If the restriction is not homogeneous, i.e. q is not equal to zero,
then this is an affine transform. | expand | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def reduce(self, params):
"""transform from the full to the reduced parameter space
Parameters
----------
params : array_like
parameters or data in the original space
Returns
-------
params_reduced : array_like
parameters in the transformed space
This transform can be applied to the original parameters as well
as to the data. If params is 2-d, then each row is transformed.
"""
params = np.asarray(params)
return params.dot(self.transf_mat) | transform from the full to the reduced parameter space
Parameters
----------
params : array_like
parameters or data in the original space
Returns
-------
params_reduced : array_like
parameters in the transformed space
This transform can be applied to the original parameters as well
as to the data. If params is 2-d, then each row is transformed. | reduce | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def transform_params_constraint(params, Sinv, R, q):
"""find the parameters that statisfy linear constraint from unconstrained
The linear constraint R params = q is imposed.
Parameters
----------
params : array_like
unconstrained parameters
Sinv : ndarray, 2d, symmetric
covariance matrix of the parameter estimate
R : ndarray, 2d
constraint matrix
q : ndarray, 1d
values of the constraint
Returns
-------
params_constraint : ndarray
parameters of the same length as params satisfying the constraint
Notes
-----
This is the exact formula for OLS and other linear models. It will be
a local approximation for nonlinear models.
TODO: Is Sinv always the covariance matrix?
In the linear case it can be (X'X)^{-1} or sigmahat^2 (X'X)^{-1}.
My guess is that this is the point in the subspace that satisfies
the constraint that has minimum Mahalanobis distance. Proof ?
"""
rsr = R.dot(Sinv).dot(R.T)
reduction = Sinv.dot(R.T).dot(np.linalg.solve(rsr, R.dot(params) - q))
return params - reduction | find the parameters that statisfy linear constraint from unconstrained
The linear constraint R params = q is imposed.
Parameters
----------
params : array_like
unconstrained parameters
Sinv : ndarray, 2d, symmetric
covariance matrix of the parameter estimate
R : ndarray, 2d
constraint matrix
q : ndarray, 1d
values of the constraint
Returns
-------
params_constraint : ndarray
parameters of the same length as params satisfying the constraint
Notes
-----
This is the exact formula for OLS and other linear models. It will be
a local approximation for nonlinear models.
TODO: Is Sinv always the covariance matrix?
In the linear case it can be (X'X)^{-1} or sigmahat^2 (X'X)^{-1}.
My guess is that this is the point in the subspace that satisfies
the constraint that has minimum Mahalanobis distance. Proof ? | transform_params_constraint | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def fit_constrained(model, constraint_matrix, constraint_values,
start_params=None, fit_kwds=None):
# note: self is model instance
"""fit model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
model: model instance
An instance of a model, see limitations in Notes section
constraint_matrix : array_like, 2D
This is R in the linear equality constraint `R params = q`.
The number of columns needs to be the same as the number of columns
in exog.
constraint_values :
This is `q` in the linear equality constraint `R params = q`
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
params : ndarray ?
estimated parameters (in the original parameterization
cov_params : ndarray
covariance matrix of the parameter estimates. This is a reverse
transformation of the covariance matrix of the transformed model given
by `cov_params()`
Note: `fit_kwds` can affect the choice of covariance, e.g. by
specifying `cov_type`, which will be reflected in the returned
covariance.
res_constr : results instance
This is the results instance for the created transformed model.
Notes
-----
Limitations:
Models where the number of parameters is different from the number of
columns of exog are not yet supported.
Requires a model that implement an offset option.
"""
self = model # internal alias, used for methods
if fit_kwds is None:
fit_kwds = {}
R, q = constraint_matrix, constraint_values
endog, exog = self.endog, self.exog
transf = TransformRestriction(R, q)
exogp_st = transf.reduce(exog)
offset = exog.dot(transf.constant.squeeze())
if hasattr(self, 'offset'):
offset += self.offset
if start_params is not None:
start_params = transf.reduce(start_params)
# need copy, because we do not want to change it, we do not need deepcopy
import copy
init_kwds = copy.copy(self._get_init_kwds())
# TODO: refactor to combine with above or offset_all
if 'offset' in init_kwds:
del init_kwds['offset']
# using offset as keywords is not supported in all modules
mod_constr = self.__class__(endog, exogp_st, offset=offset, **init_kwds)
res_constr = mod_constr.fit(start_params=start_params, **fit_kwds)
params_orig = transf.expand(res_constr.params).squeeze()
cov_params = transf.transf_mat.dot(res_constr.cov_params()).dot(transf.transf_mat.T)
return params_orig, cov_params, res_constr | fit model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
model: model instance
An instance of a model, see limitations in Notes section
constraint_matrix : array_like, 2D
This is R in the linear equality constraint `R params = q`.
The number of columns needs to be the same as the number of columns
in exog.
constraint_values :
This is `q` in the linear equality constraint `R params = q`
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
params : ndarray ?
estimated parameters (in the original parameterization
cov_params : ndarray
covariance matrix of the parameter estimates. This is a reverse
transformation of the covariance matrix of the transformed model given
by `cov_params()`
Note: `fit_kwds` can affect the choice of covariance, e.g. by
specifying `cov_type`, which will be reflected in the returned
covariance.
res_constr : results instance
This is the results instance for the created transformed model.
Notes
-----
Limitations:
Models where the number of parameters is different from the number of
columns of exog are not yet supported.
Requires a model that implement an offset option. | fit_constrained | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def fit_constrained_wrap(model, constraints, start_params=None, **fit_kwds):
"""fit_constraint that returns a results instance
This is a development version for fit_constrained methods or
fit_constrained as standalone function.
It will not work correctly for all models because creating a new
results instance is not standardized for use outside the `fit` methods,
and might need adjustements for this.
This is the prototype for the fit_constrained method that has been added
to Poisson and GLM.
"""
self = model # alias for use as method
# constraints = (R, q)
# TODO: temporary trailing underscore to not overwrite the monkey
# patched version
# TODO: decide whether to move the imports
from statsmodels.formula._manager import FormulaManager
# we need this import if we copy it to a different module
# from statsmodels.base._constraints import fit_constrained
# same pattern as in base.LikelihoodModel.t_test
mgr = FormulaManager()
lc = mgr.get_linear_constraints(constraints, self.exog_names)
R, q = lc.constraint_matrix, lc.constraint_values
# TODO: add start_params option, need access to tranformation
# fit_constrained needs to do the transformation
params, cov, res_constr = fit_constrained(self, R, q,
start_params=start_params,
fit_kwds=fit_kwds)
# create dummy results Instance, TODO: wire up properly
res = self.fit(start_params=params, maxiter=0,
warn_convergence=False) # we get a wrapper back
res._results.params = params
res._results.cov_params_default = cov
cov_type = fit_kwds.get('cov_type', 'nonrobust')
if cov_type == 'nonrobust':
res._results.normalized_cov_params = cov / res_constr.scale
else:
res._results.normalized_cov_params = None
k_constr = len(q)
res._results.df_resid += k_constr
res._results.df_model -= k_constr
res._results.constraints = LinearConstraints.from_formula_parser(lc)
res._results.k_constr = k_constr
res._results.results_constrained = res_constr
return res | fit_constraint that returns a results instance
This is a development version for fit_constrained methods or
fit_constrained as standalone function.
It will not work correctly for all models because creating a new
results instance is not standardized for use outside the `fit` methods,
and might need adjustements for this.
This is the prototype for the fit_constrained method that has been added
to Poisson and GLM. | fit_constrained_wrap | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def fit_l1_slsqp(
f, score, start_params, args, kwargs, disp=False, maxiter=1000,
callback=None, retall=False, full_output=False, hess=None):
"""
Solve the l1 regularized problem using scipy.optimize.fmin_slsqp().
Specifically: We convert the convex but non-smooth problem
.. math:: \\min_\\beta f(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem in twice
as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} f(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
Parameters
----------
All the usual parameters from LikelhoodModel.fit
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure
acc : float (default 1e-6)
Requested accuracy as used by slsqp
"""
start_params = np.array(start_params).ravel('F')
### Extract values
# k_params is total number of covariates,
# possibly including a leading constant.
k_params = len(start_params)
# The start point
x0 = np.append(start_params, np.fabs(start_params))
# alpha is the regularization parameter
alpha = np.array(kwargs['alpha_rescaled']).ravel('F')
# Make sure it's a vector
alpha = alpha * np.ones(k_params)
assert alpha.min() >= 0
# Convert display parameters to scipy.optimize form
disp_slsqp = _get_disp_slsqp(disp, retall)
# Set/retrieve the desired accuracy
acc = kwargs.setdefault('acc', 1e-10)
### Wrap up for use in fmin_slsqp
def func(x_full):
return _objective_func(f, x_full, k_params, alpha, *args)
def f_ieqcons_wrap(x_full):
return _f_ieqcons(x_full, k_params)
def fprime_wrap(x_full):
return _fprime(score, x_full, k_params, alpha)
def fprime_ieqcons_wrap(x_full):
return _fprime_ieqcons(x_full, k_params)
### Call the solver
results = fmin_slsqp(
func, x0, f_ieqcons=f_ieqcons_wrap, fprime=fprime_wrap, acc=acc,
iter=maxiter, disp=disp_slsqp, full_output=full_output,
fprime_ieqcons=fprime_ieqcons_wrap)
params = np.asarray(results[0][:k_params])
### Post-process
# QC
qc_tol = kwargs['qc_tol']
qc_verbose = kwargs['qc_verbose']
passed = l1_solvers_common.qc_results(
params, alpha, score, qc_tol, qc_verbose)
# Possibly trim
trim_mode = kwargs['trim_mode']
size_trim_tol = kwargs['size_trim_tol']
auto_trim_tol = kwargs['auto_trim_tol']
params, trimmed = l1_solvers_common.do_trim_params(
params, k_params, alpha, score, passed, trim_mode, size_trim_tol,
auto_trim_tol)
### Pack up return values for statsmodels optimizers
# TODO These retvals are returned as mle_retvals...but the fit was not ML.
# This could be confusing someday.
if full_output:
x_full, fx, its, imode, smode = results
fopt = func(np.asarray(x_full))
converged = (imode == 0)
warnflag = str(imode) + ' ' + smode
iterations = its
gopt = float('nan') # Objective is non-differentiable
hopt = float('nan')
retvals = {
'fopt': fopt, 'converged': converged, 'iterations': iterations,
'gopt': gopt, 'hopt': hopt, 'trimmed': trimmed,
'warnflag': warnflag}
### Return
if full_output:
return params, retvals
else:
return params | Solve the l1 regularized problem using scipy.optimize.fmin_slsqp().
Specifically: We convert the convex but non-smooth problem
.. math:: \\min_\\beta f(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem in twice
as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} f(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
Parameters
----------
All the usual parameters from LikelhoodModel.fit
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure
acc : float (default 1e-6)
Requested accuracy as used by slsqp | fit_l1_slsqp | python | statsmodels/statsmodels | statsmodels/base/l1_slsqp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_slsqp.py | BSD-3-Clause |
def _objective_func(f, x_full, k_params, alpha, *args):
"""
The regularized objective function
"""
x_params = x_full[:k_params]
x_added = x_full[k_params:]
## Return
return f(x_params, *args) + (alpha * x_added).sum() | The regularized objective function | _objective_func | python | statsmodels/statsmodels | statsmodels/base/l1_slsqp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_slsqp.py | BSD-3-Clause |
def _fprime(score, x_full, k_params, alpha):
"""
The regularized derivative
"""
x_params = x_full[:k_params]
# The derivative just appends a vector of constants
return np.append(score(x_params), alpha) | The regularized derivative | _fprime | python | statsmodels/statsmodels | statsmodels/base/l1_slsqp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_slsqp.py | BSD-3-Clause |
def _f_ieqcons(x_full, k_params):
"""
The inequality constraints.
"""
x_params = x_full[:k_params]
x_added = x_full[k_params:]
# All entries in this vector must be \geq 0 in a feasible solution
return np.append(x_params + x_added, x_added - x_params) | The inequality constraints. | _f_ieqcons | python | statsmodels/statsmodels | statsmodels/base/l1_slsqp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_slsqp.py | BSD-3-Clause |
def _fprime_ieqcons(x_full, k_params):
"""
Derivative of the inequality constraints
"""
I = np.eye(k_params) # noqa:E741
A = np.concatenate((I, I), axis=1)
B = np.concatenate((-I, I), axis=1)
C = np.concatenate((A, B), axis=0)
## Return
return C | Derivative of the inequality constraints | _fprime_ieqcons | python | statsmodels/statsmodels | statsmodels/base/l1_slsqp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_slsqp.py | BSD-3-Clause |
def save(self, fname, remove_data=False):
"""
Save a pickle of this instance.
Parameters
----------
fname : {str, handle}
Either a filename or a valid file handle.
remove_data : bool
If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None.
"""
from statsmodels.iolib.smpickle import save_pickle
if remove_data:
self.remove_data()
save_pickle(self, fname) | Save a pickle of this instance.
Parameters
----------
fname : {str, handle}
Either a filename or a valid file handle.
remove_data : bool
If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None. | save | python | statsmodels/statsmodels | statsmodels/base/wrapper.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/wrapper.py | BSD-3-Clause |
def load(cls, fname):
"""
Load a pickled results instance
.. warning::
Loading pickled models is not secure against erroneous or
maliciously constructed data. Never unpickle data received from
an untrusted or unauthenticated source.
Parameters
----------
fname : {str, handle}
A string filename or a file handle.
Returns
-------
Results
The unpickled results instance.
"""
from statsmodels.iolib.smpickle import load_pickle
return load_pickle(fname) | Load a pickled results instance
.. warning::
Loading pickled models is not secure against erroneous or
maliciously constructed data. Never unpickle data received from
an untrusted or unauthenticated source.
Parameters
----------
fname : {str, handle}
A string filename or a file handle.
Returns
-------
Results
The unpickled results instance. | load | python | statsmodels/statsmodels | statsmodels/base/wrapper.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/wrapper.py | BSD-3-Clause |
def fit_l1_cvxopt_cp(
f, score, start_params, args, kwargs, disp=False, maxiter=100,
callback=None, retall=False, full_output=False, hess=None):
"""
Solve the l1 regularized problem using cvxopt.solvers.cp
Specifically: We convert the convex but non-smooth problem
.. math:: \\min_\\beta f(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem in twice
as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} f(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
Parameters
----------
All the usual parameters from LikelhoodModel.fit
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure
abstol : float
absolute accuracy (default: 1e-7).
reltol : float
relative accuracy (default: 1e-6).
feastol : float
tolerance for feasibility conditions (default: 1e-7).
refinement : int
number of iterative refinement steps when solving KKT equations
(default: 1).
"""
from cvxopt import matrix, solvers
start_params = np.array(start_params).ravel('F')
## Extract arguments
# k_params is total number of covariates, possibly including a leading constant.
k_params = len(start_params)
# The start point
x0 = np.append(start_params, np.fabs(start_params))
x0 = matrix(x0, (2 * k_params, 1))
# The regularization parameter
alpha = np.array(kwargs['alpha_rescaled']).ravel('F')
# Make sure it's a vector
alpha = alpha * np.ones(k_params)
assert alpha.min() >= 0
## Wrap up functions for cvxopt
def f_0(x):
return _objective_func(f, x, k_params, alpha, *args)
def Df(x):
return _fprime(score, x, k_params, alpha)
G = _get_G(k_params) # Inequality constraint matrix, Gx \leq h
h = matrix(0.0, (2 * k_params, 1)) # RHS in inequality constraint
def H(x, z):
return _hessian_wrapper(hess, x, z, k_params)
## Define the optimization function
def F(x=None, z=None):
if x is None:
return 0, x0
elif z is None:
return f_0(x), Df(x)
else:
return f_0(x), Df(x), H(x, z)
## Convert optimization settings to cvxopt form
solvers.options['show_progress'] = disp
solvers.options['maxiters'] = maxiter
if 'abstol' in kwargs:
solvers.options['abstol'] = kwargs['abstol']
if 'reltol' in kwargs:
solvers.options['reltol'] = kwargs['reltol']
if 'feastol' in kwargs:
solvers.options['feastol'] = kwargs['feastol']
if 'refinement' in kwargs:
solvers.options['refinement'] = kwargs['refinement']
### Call the optimizer
results = solvers.cp(F, G, h)
x = np.asarray(results['x']).ravel()
params = x[:k_params]
### Post-process
# QC
qc_tol = kwargs['qc_tol']
qc_verbose = kwargs['qc_verbose']
passed = l1_solvers_common.qc_results(
params, alpha, score, qc_tol, qc_verbose)
# Possibly trim
trim_mode = kwargs['trim_mode']
size_trim_tol = kwargs['size_trim_tol']
auto_trim_tol = kwargs['auto_trim_tol']
params, trimmed = l1_solvers_common.do_trim_params(
params, k_params, alpha, score, passed, trim_mode, size_trim_tol,
auto_trim_tol)
### Pack up return values for statsmodels
# TODO These retvals are returned as mle_retvals...but the fit was not ML
if full_output:
fopt = f_0(x)
gopt = float('nan') # Objective is non-differentiable
hopt = float('nan')
iterations = float('nan')
converged = (results['status'] == 'optimal')
warnflag = results['status']
retvals = {
'fopt': fopt, 'converged': converged, 'iterations': iterations,
'gopt': gopt, 'hopt': hopt, 'trimmed': trimmed,
'warnflag': warnflag}
else:
x = np.array(results['x']).ravel()
params = x[:k_params]
### Return results
if full_output:
return params, retvals
else:
return params | Solve the l1 regularized problem using cvxopt.solvers.cp
Specifically: We convert the convex but non-smooth problem
.. math:: \\min_\\beta f(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem in twice
as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} f(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
Parameters
----------
All the usual parameters from LikelhoodModel.fit
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure
abstol : float
absolute accuracy (default: 1e-7).
reltol : float
relative accuracy (default: 1e-6).
feastol : float
tolerance for feasibility conditions (default: 1e-7).
refinement : int
number of iterative refinement steps when solving KKT equations
(default: 1). | fit_l1_cvxopt_cp | python | statsmodels/statsmodels | statsmodels/base/l1_cvxopt.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_cvxopt.py | BSD-3-Clause |
def _objective_func(f, x, k_params, alpha, *args):
"""
The regularized objective function.
"""
from cvxopt import matrix
x_arr = np.asarray(x)
params = x_arr[:k_params].ravel()
u = x_arr[k_params:]
# Call the numpy version
objective_func_arr = f(params, *args) + (alpha * u).sum()
# Return
return matrix(objective_func_arr) | The regularized objective function. | _objective_func | python | statsmodels/statsmodels | statsmodels/base/l1_cvxopt.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_cvxopt.py | BSD-3-Clause |
def _fprime(score, x, k_params, alpha):
"""
The regularized derivative.
"""
from cvxopt import matrix
x_arr = np.asarray(x)
params = x_arr[:k_params].ravel()
# Call the numpy version
# The derivative just appends a vector of constants
fprime_arr = np.append(score(params), alpha)
# Return
return matrix(fprime_arr, (1, 2 * k_params)) | The regularized derivative. | _fprime | python | statsmodels/statsmodels | statsmodels/base/l1_cvxopt.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_cvxopt.py | BSD-3-Clause |
def _get_G(k_params):
"""
The linear inequality constraint matrix.
"""
from cvxopt import matrix
I = np.eye(k_params) # noqa:E741
A = np.concatenate((-I, -I), axis=1)
B = np.concatenate((I, -I), axis=1)
C = np.concatenate((A, B), axis=0)
# Return
return matrix(C) | The linear inequality constraint matrix. | _get_G | python | statsmodels/statsmodels | statsmodels/base/l1_cvxopt.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_cvxopt.py | BSD-3-Clause |
def _hessian_wrapper(hess, x, z, k_params):
"""
Wraps the hessian up in the form for cvxopt.
cvxopt wants the hessian of the objective function and the constraints.
Since our constraints are linear, this part is all zeros.
"""
from cvxopt import matrix
x_arr = np.asarray(x)
params = x_arr[:k_params].ravel()
zh_x = np.asarray(z[0]) * hess(params)
zero_mat = np.zeros(zh_x.shape)
A = np.concatenate((zh_x, zero_mat), axis=1)
B = np.concatenate((zero_mat, zero_mat), axis=1)
zh_x_ext = np.concatenate((A, B), axis=0)
return matrix(zh_x_ext, (2 * k_params, 2 * k_params)) | Wraps the hessian up in the form for cvxopt.
cvxopt wants the hessian of the objective function and the constraints.
Since our constraints are linear, this part is all zeros. | _hessian_wrapper | python | statsmodels/statsmodels | statsmodels/base/l1_cvxopt.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_cvxopt.py | BSD-3-Clause |
def _get_penal(self, weights=None):
"""create new Penalty instance
"""
return SCADSmoothed(0.1, c0=0.0001, weights=weights) | create new Penalty instance | _get_penal | python | statsmodels/statsmodels | statsmodels/base/_screening.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_screening.py | BSD-3-Clause |
def ranking_measure(self, res_pen, exog, keep=None):
"""compute measure for ranking exog candidates for inclusion
"""
endog = self.endog
if self.ranking_project:
assert res_pen.model.exog.shape[1] == len(keep)
ex_incl = res_pen.model.exog[:, keep]
exog = exog - ex_incl.dot(np.linalg.pinv(ex_incl).dot(exog))
if self.ranking_attr == 'predicted_poisson':
# I keep this for more experiments
# TODO: does it really help to change/trim params
# we are not reestimating with trimmed model
p = res_pen.params.copy()
if keep is not None:
p[~keep] = 0
predicted = res_pen.model.predict(p)
# this is currently hardcoded for Poisson
resid_factor = (endog - predicted) / np.sqrt(predicted)
elif self.ranking_attr[:6] == 'model.':
# use model method, this is intended for score_factor
attr = self.ranking_attr.split('.')[1]
resid_factor = getattr(res_pen.model, attr)(res_pen.params)
if resid_factor.ndim == 2:
# for score_factor when extra params are in model
resid_factor = resid_factor[:, 0]
mom_cond = np.abs(resid_factor.dot(exog))**2
else:
# use results attribute
resid_factor = getattr(res_pen, self.ranking_attr)
mom_cond = np.abs(resid_factor.dot(exog))**2
return mom_cond | compute measure for ranking exog candidates for inclusion | ranking_measure | python | statsmodels/statsmodels | statsmodels/base/_screening.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_screening.py | BSD-3-Clause |
def screen_exog(self, exog, endog=None, maxiter=100, method='bfgs',
disp=False, fit_kwds=None):
"""screen and select variables (columns) in exog
Parameters
----------
exog : ndarray
candidate explanatory variables that are screened for inclusion in
the model
endog : ndarray (optional)
use a new endog in the screening model.
This is not tested yet, and might not work correctly
maxiter : int
number of screening iterations
method : str
optimization method to use in fit, needs to be only of the gradient
optimizers
disp : bool
display option for fit during optimization
Returns
-------
res_screen : instance of ScreeningResults
The attribute `results_final` contains is the results instance
with the final model selection.
`idx_nonzero` contains the index of the selected exog in the full
exog, combined exog that are always kept plust exog_candidates.
see ScreeningResults for a full description
"""
model_class = self.model_class
if endog is None:
# allow a different endog than used in model
endog = self.endog
x0 = self.exog_keep
k_keep = self.k_keep
x1 = exog
k_current = x0.shape[1]
# TODO: remove the need for x, use x1 separately from x0
# needs change to idx to be based on x1 (candidate variables)
x = np.column_stack((x0, x1))
nobs, k_vars = x.shape
fkwds = fit_kwds if fit_kwds is not None else {}
fit_kwds = {'maxiter': 200, 'disp': False}
fit_kwds.update(fkwds)
history = defaultdict(list)
idx_nonzero = np.arange(k_keep, dtype=int)
keep = np.ones(k_keep, np.bool_)
idx_excl = np.arange(k_keep, k_vars)
mod_pen = model_class(endog, x0, **self.init_kwds)
# do not penalize initial estimate
mod_pen.pen_weight = 0
res_pen = mod_pen.fit(**fit_kwds)
start_params = res_pen.params
converged = False
idx_old = []
for it in range(maxiter):
# candidates for inclusion in next iteration
x1 = x[:, idx_excl]
mom_cond = self.ranking_measure(res_pen, x1, keep=keep)
assert len(mom_cond) == len(idx_excl)
mcs = np.sort(mom_cond)[::-1]
idx_thr = min((self.k_max_add, k_current + self.k_add, len(mcs)))
threshold = mcs[idx_thr]
# indices of exog in current expansion model
idx = np.concatenate((idx_nonzero, idx_excl[mom_cond > threshold]))
start_params2 = np.zeros(len(idx))
start_params2[:len(start_params)] = start_params
if self.use_weights:
weights = np.ones(len(idx))
weights[:k_keep] = 0
# modify Penalty instance attached to self
# damgerous if res_pen is reused
self.penal.weights = weights
mod_pen = model_class(endog, x[:, idx], penal=self.penal,
pen_weight=self.pen_weight,
**self.init_kwds)
res_pen = mod_pen.fit(method=method,
start_params=start_params2,
warn_convergence=False, skip_hessian=True,
**fit_kwds)
keep = np.abs(res_pen.params) > self.threshold_trim
# use largest params to keep
if keep.sum() > self.k_max_included:
# TODO we can use now np.partition with partial sort
thresh_params = np.sort(np.abs(res_pen.params))[
-self.k_max_included]
keep2 = np.abs(res_pen.params) > thresh_params
keep = np.logical_and(keep, keep2)
# Note: idx and keep are for current expansion model
# idx_nonzero has indices of selected variables in full exog
keep[:k_keep] = True # always keep exog_keep
idx_nonzero = idx[keep]
if disp:
print(keep)
print(idx_nonzero)
# x0 is exog of currently selected model, not used in iteration
# x0 = x[:, idx_nonzero]
k_current = len(idx_nonzero)
start_params = res_pen.params[keep]
# use mask to get excluded indices
mask_excl = np.ones(k_vars, dtype=bool)
mask_excl[idx_nonzero] = False
idx_excl = np.nonzero(mask_excl)[0]
history['idx_nonzero'].append(idx_nonzero)
history['keep'].append(keep)
history['params_keep'].append(start_params)
history['idx_added'].append(idx)
if (len(idx_nonzero) == len(idx_old) and
(idx_nonzero == idx_old).all()):
converged = True
break
idx_old = idx_nonzero
# final esimate
# check that we still have exog_keep
assert np.all(idx_nonzero[:k_keep] == np.arange(k_keep))
if self.use_weights:
weights = np.ones(len(idx_nonzero))
weights[:k_keep] = 0
# create new Penalty instance to avoide sharing attached penal
penal = self._get_penal(weights=weights)
else:
penal = self.penal
mod_final = model_class(endog, x[:, idx_nonzero],
penal=penal,
pen_weight=self.pen_weight,
**self.init_kwds)
res_final = mod_final.fit(method=method,
start_params=start_params,
warn_convergence=False,
**fit_kwds)
# set exog_names for final model
xnames = ['var%4d' % ii for ii in idx_nonzero]
res_final.model.exog_names[k_keep:] = xnames[k_keep:]
res = ScreeningResults(self,
results_pen = res_pen,
results_final = res_final,
idx_nonzero = idx_nonzero,
idx_exog = idx_nonzero[k_keep:] - k_keep,
idx_excl = idx_excl,
history = history,
converged = converged,
iterations = it + 1 # it is 0-based
)
return res | screen and select variables (columns) in exog
Parameters
----------
exog : ndarray
candidate explanatory variables that are screened for inclusion in
the model
endog : ndarray (optional)
use a new endog in the screening model.
This is not tested yet, and might not work correctly
maxiter : int
number of screening iterations
method : str
optimization method to use in fit, needs to be only of the gradient
optimizers
disp : bool
display option for fit during optimization
Returns
-------
res_screen : instance of ScreeningResults
The attribute `results_final` contains is the results instance
with the final model selection.
`idx_nonzero` contains the index of the selected exog in the full
exog, combined exog that are always kept plust exog_candidates.
see ScreeningResults for a full description | screen_exog | python | statsmodels/statsmodels | statsmodels/base/_screening.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_screening.py | BSD-3-Clause |
def screen_exog_iterator(self, exog_iterator):
"""
batched version of screen exog
This screens variables in a two step process:
In the first step screen_exog is used on each element of the
exog_iterator, and the batch winners are collected.
In the second step all batch winners are combined into a new array
of exog candidates and `screen_exog` is used to select a final
model.
Parameters
----------
exog_iterator : iterator over ndarrays
Returns
-------
res_screen_final : instance of ScreeningResults
This is the instance returned by the second round call to
`screen_exog`. Additional attributes are added to provide
more information about the batched selection process.
The index of final nonzero variables is
`idx_nonzero_batches` which is a 2-dimensional array with batch
index in the first column and variable index within batch in the
second column. They can be used jointly as index for the data
in the exog_iterator.
see ScreeningResults for a full description
"""
k_keep = self.k_keep
# res_batches = []
res_idx = []
exog_winner = []
exog_idx = []
for ex in exog_iterator:
res_screen = self.screen_exog(ex, maxiter=20)
# avoid storing res_screen, only for debugging
# res_batches.append(res_screen)
res_idx.append(res_screen.idx_nonzero)
exog_winner.append(ex[:, res_screen.idx_nonzero[k_keep:] - k_keep])
exog_idx.append(res_screen.idx_nonzero[k_keep:] - k_keep)
exog_winner = np.column_stack(exog_winner)
res_screen_final = self.screen_exog(exog_winner, maxiter=20)
exog_winner_names = ['var%d_%d' % (bidx, idx)
for bidx, batch in enumerate(exog_idx)
for idx in batch]
idx_full = [(bidx, idx)
for bidx, batch in enumerate(exog_idx)
for idx in batch]
ex_final_idx = res_screen_final.idx_nonzero[k_keep:] - k_keep
final_names = np.array(exog_winner_names)[ex_final_idx]
res_screen_final.idx_nonzero_batches = np.array(idx_full)[ex_final_idx]
res_screen_final.exog_final_names = final_names
history = {'idx_nonzero': res_idx,
'idx_exog': exog_idx}
res_screen_final.history_batches = history
return res_screen_final | batched version of screen exog
This screens variables in a two step process:
In the first step screen_exog is used on each element of the
exog_iterator, and the batch winners are collected.
In the second step all batch winners are combined into a new array
of exog candidates and `screen_exog` is used to select a final
model.
Parameters
----------
exog_iterator : iterator over ndarrays
Returns
-------
res_screen_final : instance of ScreeningResults
This is the instance returned by the second round call to
`screen_exog`. Additional attributes are added to provide
more information about the batched selection process.
The index of final nonzero variables is
`idx_nonzero_batches` which is a 2-dimensional array with batch
index in the first column and variable index within batch in the
second column. They can be used jointly as index for the data
in the exog_iterator.
see ScreeningResults for a full description | screen_exog_iterator | python | statsmodels/statsmodels | statsmodels/base/_screening.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_screening.py | BSD-3-Clause |
def _asarray_2d_null_rows(x):
"""
Makes sure input is an array and is 2d. Makes sure output is 2d. True
indicates a null in the rows of 2d x.
"""
# Have to have the asarrays because isnull does not account for array_like
# input
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
return np.any(isnull(x), axis=1)[:, None] | Makes sure input is an array and is 2d. Makes sure output is 2d. True
indicates a null in the rows of 2d x. | _asarray_2d_null_rows | python | statsmodels/statsmodels | statsmodels/base/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/data.py | BSD-3-Clause |
def _nan_rows(*arrs):
"""
Returns a boolean array which is True where any of the rows in any
of the _2d_ arrays in arrs are NaNs. Inputs can be any mixture of Series,
DataFrames or array_like.
"""
if len(arrs) == 1:
arrs += ([[False]],)
def _nan_row_maybe_two_inputs(x, y):
# check for dtype bc dataframe has dtypes
x_is_boolean_array = hasattr(x, "dtype") and x.dtype == bool and x
return np.logical_or(
_asarray_2d_null_rows(x), (x_is_boolean_array | _asarray_2d_null_rows(y))
)
return reduce(_nan_row_maybe_two_inputs, arrs).squeeze() | Returns a boolean array which is True where any of the rows in any
of the _2d_ arrays in arrs are NaNs. Inputs can be any mixture of Series,
DataFrames or array_like. | _nan_rows | python | statsmodels/statsmodels | statsmodels/base/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/data.py | BSD-3-Clause |
def handle_missing(cls, endog, exog, missing, **kwargs):
"""
This returns a dictionary with keys endog, exog and the keys of
kwargs. It preserves Nones.
"""
none_array_names = []
# patsy's already dropped NaNs in y/X
missing_idx = kwargs.pop("missing_idx", None)
if missing_idx is not None:
# y, X already handled by patsy. add back in later.
combined = ()
combined_names = []
if exog is None:
none_array_names += ["exog"]
elif exog is not None:
combined = (endog, exog)
combined_names = ["endog", "exog"]
else:
combined = (endog,)
combined_names = ["endog"]
none_array_names += ["exog"]
# deal with other arrays
combined_2d = ()
combined_2d_names = []
if len(kwargs):
for key, value_array in kwargs.items():
if value_array is None or np.ndim(value_array) == 0:
none_array_names += [key]
continue
# grab 1d arrays
if value_array.ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
elif value_array.squeeze().ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
# grab 2d arrays that are _assumed_ to be symmetric
elif value_array.ndim == 2:
combined_2d += (np.asarray(value_array),)
combined_2d_names += [key]
else:
raise ValueError(
"Arrays with more than 2 dimensions " "are not yet handled"
)
if missing_idx is not None:
nan_mask = missing_idx
updated_row_mask = None
if combined: # there were extra arrays not handled by patsy
combined_nans = _nan_rows(*combined)
if combined_nans.shape[0] != nan_mask.shape[0]:
raise ValueError(
"Shape mismatch between endog/exog "
"and extra arrays given to model."
)
# for going back and updated endog/exog
updated_row_mask = combined_nans[~nan_mask]
nan_mask |= combined_nans # for updating extra arrays only
if combined_2d:
combined_2d_nans = _nan_rows(combined_2d)
if combined_2d_nans.shape[0] != nan_mask.shape[0]:
raise ValueError(
"Shape mismatch between endog/exog "
"and extra 2d arrays given to model."
)
if updated_row_mask is not None:
updated_row_mask |= combined_2d_nans[~nan_mask]
else:
updated_row_mask = combined_2d_nans[~nan_mask]
nan_mask |= combined_2d_nans
else:
nan_mask = _nan_rows(*combined)
if combined_2d:
nan_mask = _nan_rows(*(nan_mask[:, None],) + combined_2d)
if not np.any(nan_mask): # no missing do not do anything
combined = dict(zip(combined_names, combined))
if combined_2d:
combined.update(dict(zip(combined_2d_names, combined_2d)))
if none_array_names:
combined.update({k: kwargs.get(k, None) for k in none_array_names})
if missing_idx is not None:
combined.update({"endog": endog})
if exog is not None:
combined.update({"exog": exog})
return combined, []
elif missing == "raise":
raise MissingDataError("NaNs were encountered in the data")
elif missing == "drop":
nan_mask = ~nan_mask
def drop_nans(x):
return cls._drop_nans(x, nan_mask)
def drop_nans_2d(x):
return cls._drop_nans_2d(x, nan_mask)
combined = dict(zip(combined_names, lmap(drop_nans, combined)))
if missing_idx is not None:
if updated_row_mask is not None:
updated_row_mask = ~updated_row_mask
# update endog/exog with this new information
endog = cls._drop_nans(endog, updated_row_mask)
if exog is not None:
exog = cls._drop_nans(exog, updated_row_mask)
combined.update({"endog": endog})
if exog is not None:
combined.update({"exog": exog})
if combined_2d:
combined.update(
dict(zip(combined_2d_names, lmap(drop_nans_2d, combined_2d)))
)
if none_array_names:
combined.update({k: kwargs.get(k, None) for k in none_array_names})
return combined, np.where(~nan_mask)[0].tolist()
else:
raise ValueError("missing option %s not understood" % missing) | This returns a dictionary with keys endog, exog and the keys of
kwargs. It preserves Nones. | handle_missing | python | statsmodels/statsmodels | statsmodels/base/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/data.py | BSD-3-Clause |
def cov_names(self):
"""
Labels for covariance matrices
In multidimensional models, each dimension of a covariance matrix
differs from the number of param_names.
If not set, returns param_names
"""
# for handling names of covariance names in multidimensional models
if self._cov_names is not None:
return self._cov_names
return self.param_names | Labels for covariance matrices
In multidimensional models, each dimension of a covariance matrix
differs from the number of param_names.
If not set, returns param_names | cov_names | python | statsmodels/statsmodels | statsmodels/base/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/data.py | BSD-3-Clause |
def handle_data_class_factory(endog, exog):
"""
Given inputs
"""
if data_util._is_using_ndarray_type(endog, exog):
klass = ModelData
elif data_util._is_using_pandas(endog, exog):
klass = PandasData
elif data_util._is_using_patsy(endog, exog):
klass = PatsyData
elif data_util._is_using_formulaic(endog, exog):
klass = FormulaicData
# keep this check last
elif data_util._is_using_ndarray(endog, exog):
klass = ModelData
else:
raise ValueError(
"unrecognized data structures: %s / %s" % (type(endog), type(exog))
)
return klass | Given inputs | handle_data_class_factory | python | statsmodels/statsmodels | statsmodels/base/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/data.py | BSD-3-Clause |
def transform_boxcox(self, x, lmbda=None, method='guerrero', **kwargs):
"""
Performs a Box-Cox transformation on the data array x. If lmbda is None,
the indicated method is used to estimate a suitable lambda parameter.
Parameters
----------
x : array_like
lmbda : float
The lambda parameter for the Box-Cox transform. If None, a value
will be estimated by means of the specified method.
method : {'guerrero', 'loglik'}
The method to estimate the lambda parameter. Will only be used if
lmbda is None, and defaults to 'guerrero', detailed in Guerrero
(1993). 'loglik' maximizes the profile likelihood.
**kwargs
Options for the specified method.
* For 'guerrero', this entails window_length, the grouping
parameter, scale, the dispersion measure, and options, to be
passed to the optimizer.
* For 'loglik': options, to be passed to the optimizer.
Returns
-------
y : array_like
The transformed series.
lmbda : float
The lmbda parameter used to transform the series.
References
----------
Guerrero, Victor M. 1993. "Time-series analysis supported by power
transformations". `Journal of Forecasting`. 12 (1): 37-48.
Guerrero, Victor M. and Perera, Rafael. 2004. "Variance Stabilizing
Power Transformation for Time Series," `Journal of Modern Applied
Statistical Methods`. 3 (2): 357-369.
Box, G. E. P., and D. R. Cox. 1964. "An Analysis of Transformations".
`Journal of the Royal Statistical Society`. 26 (2): 211-252.
"""
x = np.asarray(x)
if np.any(x <= 0):
raise ValueError("Non-positive x.")
if lmbda is None:
lmbda = self._est_lambda(x,
method=method,
**kwargs)
# if less than 0.01, treat lambda as zero.
if np.isclose(lmbda, 0.):
y = np.log(x)
else:
y = (np.power(x, lmbda) - 1.) / lmbda
return y, lmbda | Performs a Box-Cox transformation on the data array x. If lmbda is None,
the indicated method is used to estimate a suitable lambda parameter.
Parameters
----------
x : array_like
lmbda : float
The lambda parameter for the Box-Cox transform. If None, a value
will be estimated by means of the specified method.
method : {'guerrero', 'loglik'}
The method to estimate the lambda parameter. Will only be used if
lmbda is None, and defaults to 'guerrero', detailed in Guerrero
(1993). 'loglik' maximizes the profile likelihood.
**kwargs
Options for the specified method.
* For 'guerrero', this entails window_length, the grouping
parameter, scale, the dispersion measure, and options, to be
passed to the optimizer.
* For 'loglik': options, to be passed to the optimizer.
Returns
-------
y : array_like
The transformed series.
lmbda : float
The lmbda parameter used to transform the series.
References
----------
Guerrero, Victor M. 1993. "Time-series analysis supported by power
transformations". `Journal of Forecasting`. 12 (1): 37-48.
Guerrero, Victor M. and Perera, Rafael. 2004. "Variance Stabilizing
Power Transformation for Time Series," `Journal of Modern Applied
Statistical Methods`. 3 (2): 357-369.
Box, G. E. P., and D. R. Cox. 1964. "An Analysis of Transformations".
`Journal of the Royal Statistical Society`. 26 (2): 211-252. | transform_boxcox | python | statsmodels/statsmodels | statsmodels/base/transform.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/transform.py | BSD-3-Clause |
def untransform_boxcox(self, x, lmbda, method='naive'):
"""
Back-transforms the Box-Cox transformed data array, by means of the
indicated method. The provided argument lmbda should be the lambda
parameter that was used to initially transform the data.
Parameters
----------
x : array_like
The transformed series.
lmbda : float
The lambda parameter that was used to transform the series.
method : {'naive'}
Indicates the method to be used in the untransformation. Defaults
to 'naive', which reverses the transformation.
NOTE: 'naive' is implemented natively, while other methods may be
available in subclasses!
Returns
-------
y : array_like
The untransformed series.
"""
method = method.lower()
x = np.asarray(x)
if method == 'naive':
if np.isclose(lmbda, 0.):
y = np.exp(x)
else:
y = np.power(lmbda * x + 1, 1. / lmbda)
else:
raise ValueError(f"Method '{method}' not understood.")
return y | Back-transforms the Box-Cox transformed data array, by means of the
indicated method. The provided argument lmbda should be the lambda
parameter that was used to initially transform the data.
Parameters
----------
x : array_like
The transformed series.
lmbda : float
The lambda parameter that was used to transform the series.
method : {'naive'}
Indicates the method to be used in the untransformation. Defaults
to 'naive', which reverses the transformation.
NOTE: 'naive' is implemented natively, while other methods may be
available in subclasses!
Returns
-------
y : array_like
The untransformed series. | untransform_boxcox | python | statsmodels/statsmodels | statsmodels/base/transform.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/transform.py | BSD-3-Clause |
def _est_lambda(self, x, bounds=(-1, 2), method='guerrero', **kwargs):
"""
Computes an estimate for the lambda parameter in the Box-Cox
transformation using method.
Parameters
----------
x : array_like
The untransformed data.
bounds : tuple
Numeric 2-tuple, that indicate the solution space for the lambda
parameter. Default (-1, 2).
method : {'guerrero', 'loglik'}
The method by which to estimate lambda. Defaults to 'guerrero', but
the profile likelihood ('loglik') is also available.
**kwargs
Options for the specified method.
* For 'guerrero': window_length (int), the seasonality/grouping
parameter. Scale ({'mad', 'sd'}), the dispersion measure. Options
(dict), to be passed to the optimizer.
* For 'loglik': Options (dict), to be passed to the optimizer.
Returns
-------
lmbda : float
The lambda parameter.
"""
method = method.lower()
if len(bounds) != 2:
raise ValueError("Bounds of length {} not understood."
.format(len(bounds)))
elif bounds[0] >= bounds[1]:
raise ValueError("Lower bound exceeds upper bound.")
if method == 'guerrero':
lmbda = self._guerrero_cv(x, bounds=bounds, **kwargs)
elif method == 'loglik':
lmbda = self._loglik_boxcox(x, bounds=bounds, **kwargs)
else:
raise ValueError(f"Method '{method}' not understood.")
return lmbda | Computes an estimate for the lambda parameter in the Box-Cox
transformation using method.
Parameters
----------
x : array_like
The untransformed data.
bounds : tuple
Numeric 2-tuple, that indicate the solution space for the lambda
parameter. Default (-1, 2).
method : {'guerrero', 'loglik'}
The method by which to estimate lambda. Defaults to 'guerrero', but
the profile likelihood ('loglik') is also available.
**kwargs
Options for the specified method.
* For 'guerrero': window_length (int), the seasonality/grouping
parameter. Scale ({'mad', 'sd'}), the dispersion measure. Options
(dict), to be passed to the optimizer.
* For 'loglik': Options (dict), to be passed to the optimizer.
Returns
-------
lmbda : float
The lambda parameter. | _est_lambda | python | statsmodels/statsmodels | statsmodels/base/transform.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/transform.py | BSD-3-Clause |
def _guerrero_cv(self, x, bounds, window_length=4, scale='sd',
options={'maxiter': 25}):
"""
Computes lambda using guerrero's coefficient of variation. If no
seasonality is present in the data, window_length is set to 4 (as
per Guerrero and Perera, (2004)).
NOTE: Seasonality-specific auxiliaries *should* provide their own
seasonality parameter.
Parameters
----------
x : array_like
bounds : tuple
Numeric 2-tuple, that indicate the solution space for the lambda
parameter.
window_length : int
Seasonality/grouping parameter. Default 4, as per Guerrero and
Perera (2004). NOTE: this indicates the length of the individual
groups, not the total number of groups!
scale : {'sd', 'mad'}
The dispersion measure to be used. 'sd' indicates the sample
standard deviation, but the more robust 'mad' is also available.
options : dict
The options (as a dict) to be passed to the optimizer.
"""
nobs = len(x)
groups = int(nobs / window_length)
# remove the first n < window_length observations from consideration.
grouped_data = np.reshape(x[nobs - (groups * window_length): nobs],
(groups, window_length))
mean = np.mean(grouped_data, 1)
scale = scale.lower()
if scale == 'sd':
dispersion = np.std(grouped_data, 1, ddof=1)
elif scale == 'mad':
dispersion = mad(grouped_data, axis=1)
else:
raise ValueError(f"Scale '{scale}' not understood.")
def optim(lmbda):
rat = np.divide(dispersion, np.power(mean, 1 - lmbda)) # eq 6, p 40
return np.std(rat, ddof=1) / np.mean(rat)
res = minimize_scalar(optim,
bounds=bounds,
method='bounded',
options=options)
return res.x | Computes lambda using guerrero's coefficient of variation. If no
seasonality is present in the data, window_length is set to 4 (as
per Guerrero and Perera, (2004)).
NOTE: Seasonality-specific auxiliaries *should* provide their own
seasonality parameter.
Parameters
----------
x : array_like
bounds : tuple
Numeric 2-tuple, that indicate the solution space for the lambda
parameter.
window_length : int
Seasonality/grouping parameter. Default 4, as per Guerrero and
Perera (2004). NOTE: this indicates the length of the individual
groups, not the total number of groups!
scale : {'sd', 'mad'}
The dispersion measure to be used. 'sd' indicates the sample
standard deviation, but the more robust 'mad' is also available.
options : dict
The options (as a dict) to be passed to the optimizer. | _guerrero_cv | python | statsmodels/statsmodels | statsmodels/base/transform.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/transform.py | BSD-3-Clause |
def _loglik_boxcox(self, x, bounds, options={'maxiter': 25}):
"""
Taken from the Stata manual on Box-Cox regressions, where this is the
special case of 'lhs only'. As an estimator for the variance, the
sample variance is used, by means of the well-known formula.
Parameters
----------
x : array_like
options : dict
The options (as a dict) to be passed to the optimizer.
"""
sum_x = np.sum(np.log(x))
nobs = len(x)
def optim(lmbda):
y, lmbda = self.transform_boxcox(x, lmbda)
return (1 - lmbda) * sum_x + (nobs / 2.) * np.log(np.var(y))
res = minimize_scalar(optim,
bounds=bounds,
method='bounded',
options=options)
return res.x | Taken from the Stata manual on Box-Cox regressions, where this is the
special case of 'lhs only'. As an estimator for the variance, the
sample variance is used, by means of the well-known formula.
Parameters
----------
x : array_like
options : dict
The options (as a dict) to be passed to the optimizer. | _loglik_boxcox | python | statsmodels/statsmodels | statsmodels/base/transform.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/transform.py | BSD-3-Clause |
def _fit(self, objective, gradient, start_params, fargs, kwargs,
hessian=None, method='newton', maxiter=100, full_output=True,
disp=True, callback=None, retall=False):
"""
Fit function for any model with an objective function.
Parameters
----------
objective : function
Objective function to be minimized.
gradient : function
The gradient of the objective function.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
hessian : str, optional
Method for computing the Hessian matrix, if applicable.
method : str {'newton','nm','bfgs','powell','cg','ncg','basinhopping',
'minimize'}
Method can be 'newton' for Newton-Raphson, 'nm' for Nelder-Mead,
'bfgs' for Broyden-Fletcher-Goldfarb-Shanno, 'powell' for modified
Powell's method, 'cg' for conjugate gradient, 'ncg' for Newton-
conjugate gradient, 'basinhopping' for global basin-hopping
solver, if available or a generic 'minimize' which is a wrapper for
scipy.optimize.minimize. `method` determines which solver from
scipy.optimize is used. The explicit arguments in `fit` are passed
to the solver, with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports..
maxiter : int
The maximum number of iterations to perform.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool
Set to True to print convergence messages.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
optim_settings : dict
A dictionary that contains the parameters passed to the solver.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for the solvers (available in Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.inf is max, -np.inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'lbfgs'
m : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many
terms in an approximation to it.)
pgtol : float
The iteration will stop when
``max{|proj g_i | i = 1, ..., n} <= pgtol`` where pg_i is
the i-th component of the projected gradient.
factr : float
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where eps is the machine precision, which is automatically
generated by the code. Typical values for factr are: 1e12
for low accuracy; 1e7 for moderate accuracy; 10.0 for
extremely high accuracy. See Notes for relationship to
ftol, which is exposed (instead of factr) by the
scipy.optimize.minimize interface to L-BFGS-B.
maxfun : int
Maximum number of iterations.
epsilon : float
Step size used when approx_grad is True, for numerically
calculating the gradient
approx_grad : bool
Whether to approximate the gradient numerically (in which
case func returns only the function value).
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.inf is max, -np.inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : int
The number of basin hopping iterations.
niter_success : int
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : int
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
'minimize'
min_method : str, optional
Name of minimization method to use.
Any method specific arguments can be passed directly.
For a list of methods and their arguments, see
documentation of `scipy.optimize.minimize`.
If no method is specified, then BFGS is used.
"""
# TODO: generalize the regularization stuff
# Extract kwargs specific to fit_regularized calling fit
extra_fit_funcs = kwargs.get('extra_fit_funcs', dict())
methods = ['newton', 'nm', 'bfgs', 'lbfgs', 'powell', 'cg', 'ncg',
'basinhopping', 'minimize']
methods += extra_fit_funcs.keys()
method = method.lower()
_check_method(method, methods)
fit_funcs = {
'newton': _fit_newton,
'nm': _fit_nm, # Nelder-Mead
'bfgs': _fit_bfgs,
'lbfgs': _fit_lbfgs,
'cg': _fit_cg,
'ncg': _fit_ncg,
'powell': _fit_powell,
'basinhopping': _fit_basinhopping,
'minimize': _fit_minimize # wrapper for scipy.optimize.minimize
}
# NOTE: fit_regularized checks the methods for these but it should be
# moved up probably
if extra_fit_funcs:
fit_funcs.update(extra_fit_funcs)
func = fit_funcs[method]
xopt, retvals = func(objective, gradient, start_params, fargs, kwargs,
disp=disp, maxiter=maxiter, callback=callback,
retall=retall, full_output=full_output,
hess=hessian)
optim_settings = {'optimizer': method, 'start_params': start_params,
'maxiter': maxiter, 'full_output': full_output,
'disp': disp, 'fargs': fargs, 'callback': callback,
'retall': retall, "extra_fit_funcs": extra_fit_funcs}
optim_settings.update(kwargs)
# set as attributes or return?
return xopt, retvals, optim_settings | Fit function for any model with an objective function.
Parameters
----------
objective : function
Objective function to be minimized.
gradient : function
The gradient of the objective function.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
hessian : str, optional
Method for computing the Hessian matrix, if applicable.
method : str {'newton','nm','bfgs','powell','cg','ncg','basinhopping',
'minimize'}
Method can be 'newton' for Newton-Raphson, 'nm' for Nelder-Mead,
'bfgs' for Broyden-Fletcher-Goldfarb-Shanno, 'powell' for modified
Powell's method, 'cg' for conjugate gradient, 'ncg' for Newton-
conjugate gradient, 'basinhopping' for global basin-hopping
solver, if available or a generic 'minimize' which is a wrapper for
scipy.optimize.minimize. `method` determines which solver from
scipy.optimize is used. The explicit arguments in `fit` are passed
to the solver, with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports..
maxiter : int
The maximum number of iterations to perform.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool
Set to True to print convergence messages.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
optim_settings : dict
A dictionary that contains the parameters passed to the solver.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for the solvers (available in Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.inf is max, -np.inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'lbfgs'
m : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many
terms in an approximation to it.)
pgtol : float
The iteration will stop when
``max{|proj g_i | i = 1, ..., n} <= pgtol`` where pg_i is
the i-th component of the projected gradient.
factr : float
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where eps is the machine precision, which is automatically
generated by the code. Typical values for factr are: 1e12
for low accuracy; 1e7 for moderate accuracy; 10.0 for
extremely high accuracy. See Notes for relationship to
ftol, which is exposed (instead of factr) by the
scipy.optimize.minimize interface to L-BFGS-B.
maxfun : int
Maximum number of iterations.
epsilon : float
Step size used when approx_grad is True, for numerically
calculating the gradient
approx_grad : bool
Whether to approximate the gradient numerically (in which
case func returns only the function value).
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.inf is max, -np.inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : int
The number of basin hopping iterations.
niter_success : int
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : int
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
'minimize'
min_method : str, optional
Name of minimization method to use.
Any method specific arguments can be passed directly.
For a list of methods and their arguments, see
documentation of `scipy.optimize.minimize`.
If no method is specified, then BFGS is used. | _fit | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_constrained(self, params):
"""
TODO: how to add constraints?
Something like
sm.add_constraint(Model, func)
or
model_instance.add_constraint(func)
model_instance.add_constraint("x1 + x2 = 2")
result = model_instance.fit()
"""
raise NotImplementedError | TODO: how to add constraints?
Something like
sm.add_constraint(Model, func)
or
model_instance.add_constraint(func)
model_instance.add_constraint("x1 + x2 = 2")
result = model_instance.fit() | _fit_constrained | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_minimize(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using scipy minimize, where kwarg `min_method` defines the algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
kwargs.setdefault('min_method', 'BFGS')
# prepare options dict for minimize
filter_opts = ['extra_fit_funcs', 'niter', 'min_method', 'tol', 'bounds', 'constraints']
options = {k: v for k, v in kwargs.items() if k not in filter_opts}
options['disp'] = disp
options['maxiter'] = maxiter
# Use Hessian/Jacobian only if they're required by the method
no_hess = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'COBYLA', 'SLSQP']
no_jac = ['Nelder-Mead', 'Powell', 'COBYLA']
if kwargs['min_method'] in no_hess:
hess = None
if kwargs['min_method'] in no_jac:
score = None
# Use bounds/constraints only if they're allowed by the method
has_bounds = ['L-BFGS-B', 'TNC', 'SLSQP', 'trust-constr']
# Added in SP 1.5
if not SP_LT_15:
has_bounds += ['Powell']
# Added in SP 1.7
if not SP_LT_17:
has_bounds += ['Nelder-Mead']
has_constraints = ['COBYLA', 'SLSQP', 'trust-constr']
if 'bounds' in kwargs.keys() and kwargs['min_method'] in has_bounds:
bounds = kwargs['bounds']
else:
bounds = None
if 'constraints' in kwargs.keys() and kwargs['min_method'] in has_constraints:
constraints = kwargs['constraints']
else:
constraints = ()
res = optimize.minimize(f, start_params, args=fargs, method=kwargs['min_method'],
jac=score, hess=hess, bounds=bounds, constraints=constraints,
callback=callback, options=options)
xopt = res.x
retvals = None
if full_output:
nit = getattr(res, 'nit', np.nan) # scipy 0.14 compat
retvals = {'fopt': res.fun, 'iterations': nit,
'fcalls': res.nfev, 'warnflag': res.status,
'converged': res.success}
if retall:
retvals.update({'allvecs': res.values()})
return xopt, retvals | Fit using scipy minimize, where kwarg `min_method` defines the algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_minimize | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_newton(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None, ridge_factor=1e-10):
"""
Fit using Newton-Raphson algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
ridge_factor : float
Regularization factor for Hessian matrix.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("tol", "ridge_factor"), "newton")
tol = kwargs.setdefault('tol', 1e-8)
ridge_factor = kwargs.setdefault('ridge_factor', 1e-10)
iterations = 0
oldparams = np.inf
newparams = np.asarray(start_params)
if retall:
history = [oldparams, newparams]
while (iterations < maxiter and np.any(np.abs(newparams -
oldparams) > tol)):
H = np.asarray(hess(newparams))
# regularize Hessian, not clear what ridge factor should be
# keyword option with absolute default 1e-10, see #1847
if not np.all(ridge_factor == 0):
H[np.diag_indices(H.shape[0])] += ridge_factor
oldparams = newparams
newparams = oldparams - np.linalg.solve(H, score(oldparams))
if retall:
history.append(newparams)
if callback is not None:
callback(newparams)
iterations += 1
fval = f(newparams, *fargs) # this is the negative likelihood
if iterations == maxiter:
warnflag = 1
if disp:
print("Warning: Maximum number of iterations has been "
"exceeded.")
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
else:
warnflag = 0
if disp:
print("Optimization terminated successfully.")
print(" Current function value: %f" % fval)
print(" Iterations %d" % iterations)
if full_output:
(xopt, fopt, niter,
gopt, hopt) = (newparams, f(newparams, *fargs),
iterations, score(newparams),
hess(newparams))
converged = not warnflag
retvals = {'fopt': fopt, 'iterations': niter, 'score': gopt,
'Hessian': hopt, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': history})
else:
xopt = newparams
retvals = None
return xopt, retvals | Fit using Newton-Raphson algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
ridge_factor : float
Regularization factor for Hessian matrix.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_newton | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_bfgs(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Broyden-Fletcher-Goldfarb-Shannon algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("gtol", "norm", "epsilon"), "bfgs")
gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
norm = kwargs.setdefault('norm', np.inf)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_bfgs(f, start_params, score, args=fargs,
gtol=gtol, norm=norm, epsilon=epsilon,
maxiter=maxiter, full_output=full_output,
disp=disp, retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, gopt, Hinv, fcalls, gcalls, warnflag = retvals
else:
(xopt, fopt, gopt, Hinv, fcalls,
gcalls, warnflag, allvecs) = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'gopt': gopt, 'Hinv': Hinv,
'fcalls': fcalls, 'gcalls': gcalls, 'warnflag':
warnflag, 'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals | Fit using Broyden-Fletcher-Goldfarb-Shannon algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_bfgs | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_lbfgs(f, score, start_params, fargs, kwargs, disp=True, maxiter=100,
callback=None, retall=False, full_output=True, hess=None):
"""
Fit using Limited-memory Broyden-Fletcher-Goldfarb-Shannon algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
Notes
-----
Within the mle part of statsmodels, the log likelihood function and
its gradient with respect to the parameters do not have notationally
consistent sign.
"""
check_kwargs(
kwargs,
("m", "pgtol", "factr", "maxfun", "epsilon", "approx_grad", "bounds", "loglike_and_score", "iprint"),
"lbfgs"
)
# Use unconstrained optimization by default.
bounds = kwargs.setdefault('bounds', [(None, None)] * len(start_params))
kwargs.setdefault('iprint', 0)
# Pass the following keyword argument names through to fmin_l_bfgs_b
# if they are present in kwargs, otherwise use the fmin_l_bfgs_b
# default values.
names = ('m', 'pgtol', 'factr', 'maxfun', 'epsilon', 'approx_grad')
extra_kwargs = {x: kwargs[x] for x in names if x in kwargs}
# Extract values for the options related to the gradient.
approx_grad = kwargs.get('approx_grad', False)
loglike_and_score = kwargs.get('loglike_and_score', None)
epsilon = kwargs.get('epsilon', None)
# The approx_grad flag has superpowers nullifying the score function arg.
if approx_grad:
score = None
# Choose among three options for dealing with the gradient (the gradient
# of a log likelihood function with respect to its parameters
# is more specifically called the score in statistics terminology).
# The first option is to use the finite-differences
# approximation that is built into the fmin_l_bfgs_b optimizer.
# The second option is to use the provided score function.
# The third option is to use the score component of a provided
# function that simultaneously evaluates the log likelihood and score.
if epsilon and not approx_grad:
raise ValueError('a finite-differences epsilon was provided '
'even though we are not using approx_grad')
if approx_grad and loglike_and_score:
raise ValueError('gradient approximation was requested '
'even though an analytic loglike_and_score function '
'was given')
if loglike_and_score:
def func(p, *a):
return tuple(-x for x in loglike_and_score(p, *a))
elif score:
func = f
extra_kwargs['fprime'] = score
elif approx_grad:
func = f
retvals = optimize.fmin_l_bfgs_b(func, start_params, maxiter=maxiter,
callback=callback, args=fargs,
bounds=bounds, disp=disp,
**extra_kwargs)
if full_output:
xopt, fopt, d = retvals
# The warnflag is
# 0 if converged
# 1 if too many function evaluations or too many iterations
# 2 if stopped for another reason, given in d['task']
warnflag = d['warnflag']
converged = (warnflag == 0)
gopt = d['grad']
fcalls = d['funcalls']
iterations = d['nit']
retvals = {'fopt': fopt, 'gopt': gopt, 'fcalls': fcalls,
'warnflag': warnflag, 'converged': converged,
'iterations': iterations}
else:
xopt = retvals[0]
retvals = None
return xopt, retvals | Fit using Limited-memory Broyden-Fletcher-Goldfarb-Shannon algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
Notes
-----
Within the mle part of statsmodels, the log likelihood function and
its gradient with respect to the parameters do not have notationally
consistent sign. | _fit_lbfgs | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_nm(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Nelder-Mead algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("xtol", "ftol", "maxfun"), "nm")
xtol = kwargs.setdefault('xtol', 0.0001)
ftol = kwargs.setdefault('ftol', 0.0001)
maxfun = kwargs.setdefault('maxfun', None)
retvals = optimize.fmin(f, start_params, args=fargs, xtol=xtol,
ftol=ftol, maxiter=maxiter, maxfun=maxfun,
full_output=full_output, disp=disp, retall=retall,
callback=callback)
if full_output:
if not retall:
xopt, fopt, niter, fcalls, warnflag = retvals
else:
xopt, fopt, niter, fcalls, warnflag, allvecs = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'iterations': niter,
'fcalls': fcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals | Fit using Nelder-Mead algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_nm | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_cg(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Conjugate Gradient algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("gtol", "norm", "epsilon"), "cg")
gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
norm = kwargs.setdefault('norm', np.inf)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_cg(f, start_params, score, gtol=gtol, norm=norm,
epsilon=epsilon, maxiter=maxiter,
full_output=full_output, disp=disp,
retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, fcalls, gcalls, warnflag = retvals
else:
xopt, fopt, fcalls, gcalls, warnflag, allvecs = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,
'warnflag': warnflag, 'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals | Fit using Conjugate Gradient algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_cg | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_ncg(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Newton Conjugate Gradient algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("fhess_p", "avextol", "epsilon"), "ncg")
fhess_p = kwargs.setdefault('fhess_p', None)
avextol = kwargs.setdefault('avextol', 1.0000000000000001e-05)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_ncg(f, start_params, score, fhess_p=fhess_p,
fhess=hess, args=fargs, avextol=avextol,
epsilon=epsilon, maxiter=maxiter,
full_output=full_output, disp=disp,
retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, fcalls, gcalls, hcalls, warnflag = retvals
else:
xopt, fopt, fcalls, gcalls, hcalls, warnflag, allvecs = \
retvals
converged = not warnflag
retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,
'hcalls': hcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals | Fit using Newton Conjugate Gradient algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_ncg | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_powell(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Powell's conjugate direction algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("xtol", "ftol", "maxfun", "start_direc"), "powell")
xtol = kwargs.setdefault('xtol', 0.0001)
ftol = kwargs.setdefault('ftol', 0.0001)
maxfun = kwargs.setdefault('maxfun', None)
start_direc = kwargs.setdefault('start_direc', None)
retvals = optimize.fmin_powell(f, start_params, args=fargs, xtol=xtol,
ftol=ftol, maxiter=maxiter, maxfun=maxfun,
full_output=full_output, disp=disp,
retall=retall, callback=callback,
direc=start_direc)
if full_output:
if not retall:
xopt, fopt, direc, niter, fcalls, warnflag = retvals
else:
xopt, fopt, direc, niter, fcalls, warnflag, allvecs = \
retvals
converged = not warnflag
retvals = {'fopt': fopt, 'direc': direc, 'iterations': niter,
'fcalls': fcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals | Fit using Powell's conjugate direction algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_powell | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_basinhopping(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Basin-hopping algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(
kwargs,
("niter", "niter_success", "T", "stepsize", "interval", "minimizer", "seed"),
"basinhopping"
)
kwargs = {k: v for k, v in kwargs.items()}
niter = kwargs.setdefault('niter', 100)
niter_success = kwargs.setdefault('niter_success', None)
T = kwargs.setdefault('T', 1.0)
stepsize = kwargs.setdefault('stepsize', 0.5)
interval = kwargs.setdefault('interval', 50)
seed = kwargs.get("seed")
minimizer_kwargs = kwargs.get('minimizer', {})
minimizer_kwargs['args'] = fargs
minimizer_kwargs['jac'] = score
method = minimizer_kwargs.get('method', None)
if method and method != 'L-BFGS-B': # l_bfgs_b does not take a hessian
minimizer_kwargs['hess'] = hess
retvals = optimize.basinhopping(f, start_params,
minimizer_kwargs=minimizer_kwargs,
niter=niter, niter_success=niter_success,
T=T, stepsize=stepsize, disp=disp,
callback=callback, interval=interval,
seed=seed)
xopt = retvals.x
if full_output:
retvals = {
'fopt': retvals.fun,
'iterations': retvals.nit,
'fcalls': retvals.nfev,
'converged': 'completed successfully' in retvals.message[0]
}
else:
retvals = None
return xopt, retvals | Fit using Basin-hopping algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_basinhopping | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def normalize_cov_type(cov_type):
"""
Normalize the cov_type string to a canonical version
Parameters
----------
cov_type : str
Returns
-------
normalized_cov_type : str
"""
if cov_type == 'nw-panel':
cov_type = 'hac-panel'
if cov_type == 'nw-groupsum':
cov_type = 'hac-groupsum'
return cov_type | Normalize the cov_type string to a canonical version
Parameters
----------
cov_type : str
Returns
-------
normalized_cov_type : str | normalize_cov_type | python | statsmodels/statsmodels | statsmodels/base/covtype.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/covtype.py | BSD-3-Clause |
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwds):
"""create new results instance with robust covariance as default
Parameters
----------
cov_type : str
the type of robust sandwich estimator to use. see Notes below
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
kwds : depends on cov_type
Required or optional arguments for robust covariance calculation.
see Notes below
Returns
-------
results : results instance
This method creates a new results instance with the requested
robust covariance as the default covariance of the parameters.
Inferential statistics like p-values and hypothesis tests will be
based on this covariance matrix.
Notes
-----
Warning: Some of the options and defaults in cov_kwds may be changed in a
future version.
The covariance keywords provide an option 'scaling_factor' to adjust the
scaling of the covariance matrix, that is the covariance is multiplied by
this factor if it is given and is not `None`. This allows the user to
adjust the scaling of the covariance matrix to match other statistical
packages.
For example, `scaling_factor=(nobs - 1.) / (nobs - k_params)` provides a
correction so that the robust covariance matrices match those of Stata in
some models like GLM and discrete Models.
The following covariance types and required or optional arguments are
currently available:
- 'HC0', 'HC1', 'HC2', 'HC3': heteroscedasticity robust covariance
- no keyword arguments
- 'HAC': heteroskedasticity-autocorrelation robust covariance
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
kernels currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
``use_correction``: bool, optional
If true, use small sample correction
- 'cluster': clustered covariance estimator
``groups`` : array_like[int], required :
Integer-valued index of clusters or groups.
``use_correction``: bool, optional
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
``df_correction``: bool, optional
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is also
adjusted. When `use_t` is also True, then pvalues are
computed using the Student's t distribution using the
corrected values. These may differ substantially from
p-values based on the normal is the number of groups is
small.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum': Driscoll and Kraay, heteroscedasticity and
autocorrelation robust covariance for panel data
# TODO: more options needed here
``time`` : array_like, required
index of time periods
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
The available kernels are ['bartlett', 'uniform']. The default is
Bartlett.
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the the sandwich covariance is calculated without small
sample correction. If `use_correction = 'cluster'` (default),
then the same small sample correction as in the case of
`covtype='cluster'` is used.
``df_correction`` : bool, optional
The adjustment to df_resid, see cov_type 'cluster' above
- 'hac-panel': heteroscedasticity and autocorrelation robust standard
errors in panel data. The data needs to be sorted in this case, the
time series for each panel unit or cluster need to be stacked. The
membership to a time series of an individual or group can be either
specified by group indicators or by increasing time periods. One of
``groups`` or ``time`` is required. # TODO: we need more options here
``groups`` : array_like[int]
indicator for groups
``time`` : array_like[int]
index of time periods
``maxlags`` : int, required
number of lags to use
``kernel`` : {callable, str}, optional
Available kernels are ['bartlett', 'uniform'], default
is Bartlett
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the sandwich covariance is calculated without
small sample correction.
``df_correction`` : bool, optional
Adjustment to df_resid, see cov_type 'cluster' above
**Reminder**: ``use_correction`` in "hac-groupsum" and "hac-panel" is
not bool, needs to be in {False, 'hac', 'cluster'}.
.. todo:: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
import statsmodels.stats.sandwich_covariance as sw
cov_type = normalize_cov_type(cov_type)
if 'kernel' in kwds:
kwds['weights_func'] = kwds.pop('kernel')
if 'weights_func' in kwds and not callable(kwds['weights_func']):
kwds['weights_func'] = sw.kernel_dict[kwds['weights_func']]
# pop because HCx raises if any kwds
sc_factor = kwds.pop('scaling_factor', None)
# TODO: make separate function that returns a robust cov plus info
use_self = kwds.pop('use_self', False)
if use_self:
res = self
else:
# this does not work for most models, use raw instance instead from fit
res = self.__class__(self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t':use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'hac-panel', 'hac-groupsum']:
df_correction = kwds.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user did not explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwds, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwds
if cov_type.upper() in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwds:
raise ValueError('heteroscedasticity robust covariance '
'does not use keywords')
res.cov_kwds['description'] = descriptions[cov_type.upper()]
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper(), None)
if res.cov_params_default is None:
# results classes that do not have cov_HCx attribute
res.cov_params_default = sw.cov_white_simple(self,
use_correction=False)
elif cov_type.lower() == 'hac':
maxlags = kwds['maxlags'] # required?, default in cov_hac_simple
res.cov_kwds['maxlags'] = maxlags
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
use_correction = kwds.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = descriptions['HAC'].format(
maxlags=maxlags, correction=['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(self, nlags=maxlags,
weights_func=weights_func,
use_correction=use_correction)
elif cov_type.lower() == 'cluster':
#cluster robust standard errors, one- or two-way
groups = kwds['groups']
if not hasattr(groups, 'shape'):
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwds.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(self, groups,
use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:,0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(self, groups,
use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = descriptions['cluster']
elif cov_type.lower() == 'hac-panel':
#cluster robust standard errors
res.cov_kwds['time'] = time = kwds.get('time', None)
res.cov_kwds['groups'] = groups = kwds.get('groups', None)
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
# TODO: clumsy time index in cov_nw_panel
if groups is not None:
groups = np.asarray(groups)
tt = (np.nonzero(groups[:-1] != groups[1:])[0] + 1).tolist()
nobs_ = len(groups)
elif time is not None:
# TODO: clumsy time index in cov_nw_panel
time = np.asarray(time)
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1).tolist()
nobs_ = len(time)
else:
raise ValueError('either time or groups needs to be given')
groupidx = lzip([0] + tt, tt + [nobs_])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Panel']
elif cov_type.lower() == 'hac-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(self, maxlags, time,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Groupsum']
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
# generic optional factor to scale covariance
res.cov_kwds['scaling_factor'] = sc_factor
if sc_factor is not None:
res.cov_params_default *= sc_factor
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res | create new results instance with robust covariance as default
Parameters
----------
cov_type : str
the type of robust sandwich estimator to use. see Notes below
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
kwds : depends on cov_type
Required or optional arguments for robust covariance calculation.
see Notes below
Returns
-------
results : results instance
This method creates a new results instance with the requested
robust covariance as the default covariance of the parameters.
Inferential statistics like p-values and hypothesis tests will be
based on this covariance matrix.
Notes
-----
Warning: Some of the options and defaults in cov_kwds may be changed in a
future version.
The covariance keywords provide an option 'scaling_factor' to adjust the
scaling of the covariance matrix, that is the covariance is multiplied by
this factor if it is given and is not `None`. This allows the user to
adjust the scaling of the covariance matrix to match other statistical
packages.
For example, `scaling_factor=(nobs - 1.) / (nobs - k_params)` provides a
correction so that the robust covariance matrices match those of Stata in
some models like GLM and discrete Models.
The following covariance types and required or optional arguments are
currently available:
- 'HC0', 'HC1', 'HC2', 'HC3': heteroscedasticity robust covariance
- no keyword arguments
- 'HAC': heteroskedasticity-autocorrelation robust covariance
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
kernels currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
``use_correction``: bool, optional
If true, use small sample correction
- 'cluster': clustered covariance estimator
``groups`` : array_like[int], required :
Integer-valued index of clusters or groups.
``use_correction``: bool, optional
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
``df_correction``: bool, optional
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is also
adjusted. When `use_t` is also True, then pvalues are
computed using the Student's t distribution using the
corrected values. These may differ substantially from
p-values based on the normal is the number of groups is
small.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum': Driscoll and Kraay, heteroscedasticity and
autocorrelation robust covariance for panel data
# TODO: more options needed here
``time`` : array_like, required
index of time periods
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
The available kernels are ['bartlett', 'uniform']. The default is
Bartlett.
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the the sandwich covariance is calculated without small
sample correction. If `use_correction = 'cluster'` (default),
then the same small sample correction as in the case of
`covtype='cluster'` is used.
``df_correction`` : bool, optional
The adjustment to df_resid, see cov_type 'cluster' above
- 'hac-panel': heteroscedasticity and autocorrelation robust standard
errors in panel data. The data needs to be sorted in this case, the
time series for each panel unit or cluster need to be stacked. The
membership to a time series of an individual or group can be either
specified by group indicators or by increasing time periods. One of
``groups`` or ``time`` is required. # TODO: we need more options here
``groups`` : array_like[int]
indicator for groups
``time`` : array_like[int]
index of time periods
``maxlags`` : int, required
number of lags to use
``kernel`` : {callable, str}, optional
Available kernels are ['bartlett', 'uniform'], default
is Bartlett
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the sandwich covariance is calculated without
small sample correction.
``df_correction`` : bool, optional
Adjustment to df_resid, see cov_type 'cluster' above
**Reminder**: ``use_correction`` in "hac-groupsum" and "hac-panel" is
not bool, needs to be in {False, 'hac', 'cluster'}.
.. todo:: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx` | get_robustcov_results | python | statsmodels/statsmodels | statsmodels/base/covtype.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/covtype.py | BSD-3-Clause |
def _lm_robust(score, constraint_matrix, score_deriv_inv, cov_score,
cov_params=None):
'''general formula for score/LM test
generalized score or lagrange multiplier test for implicit constraints
`r(params) = 0`, with gradient `R = d r / d params`
linear constraints are given by `R params - q = 0`
It is assumed that all arrays are evaluated at the constrained estimates.
Parameters
----------
score : ndarray, 1-D
derivative of objective function at estimated parameters
of constrained model
constraint_matrix R : ndarray
Linear restriction matrix or Jacobian of nonlinear constraints
score_deriv_inv, Ainv : ndarray, symmetric, square
inverse of second derivative of objective function
TODO: could be inverse of OPG or any other estimator if information
matrix equality holds
cov_score B : ndarray, symmetric, square
covariance matrix of the score. This is the inner part of a sandwich
estimator.
cov_params V : ndarray, symmetric, square
covariance of full parameter vector evaluated at constrained parameter
estimate. This can be specified instead of cov_score B.
Returns
-------
lm_stat : float
score/lagrange multiplier statistic
p-value : float
p-value of the LM test based on chisquare distribution
Notes
-----
'''
# shorthand alias
R, Ainv, B, V = constraint_matrix, score_deriv_inv, cov_score, cov_params
k_constraints = np.linalg.matrix_rank(R)
tmp = R.dot(Ainv)
wscore = tmp.dot(score) # C Ainv score
if B is None and V is None:
# only Ainv is given, so we assume information matrix identity holds
# computational short cut, should be same if Ainv == inv(B)
lm_stat = score.dot(Ainv.dot(score))
else:
# information matrix identity does not hold
if V is None:
inner = tmp.dot(B).dot(tmp.T)
else:
inner = R.dot(V).dot(R.T)
#lm_stat2 = wscore.dot(np.linalg.pinv(inner).dot(wscore))
# Let's assume inner is invertible, TODO: check if usecase for pinv exists
lm_stat = wscore.dot(np.linalg.solve(inner, wscore))
pval = stats.chi2.sf(lm_stat, k_constraints)
return lm_stat, pval, k_constraints | general formula for score/LM test
generalized score or lagrange multiplier test for implicit constraints
`r(params) = 0`, with gradient `R = d r / d params`
linear constraints are given by `R params - q = 0`
It is assumed that all arrays are evaluated at the constrained estimates.
Parameters
----------
score : ndarray, 1-D
derivative of objective function at estimated parameters
of constrained model
constraint_matrix R : ndarray
Linear restriction matrix or Jacobian of nonlinear constraints
score_deriv_inv, Ainv : ndarray, symmetric, square
inverse of second derivative of objective function
TODO: could be inverse of OPG or any other estimator if information
matrix equality holds
cov_score B : ndarray, symmetric, square
covariance matrix of the score. This is the inner part of a sandwich
estimator.
cov_params V : ndarray, symmetric, square
covariance of full parameter vector evaluated at constrained parameter
estimate. This can be specified instead of cov_score B.
Returns
-------
lm_stat : float
score/lagrange multiplier statistic
p-value : float
p-value of the LM test based on chisquare distribution
Notes
----- | _lm_robust | python | statsmodels/statsmodels | statsmodels/base/_parameter_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_parameter_inference.py | BSD-3-Clause |
def score_test(self, exog_extra=None, params_constrained=None,
hypothesis='joint', cov_type=None, cov_kwds=None,
k_constraints=None, r_matrix=None, scale=None, observed=True):
"""score test for restrictions or for omitted variables
Null Hypothesis : constraints are satisfied
Alternative Hypothesis : at least one of the constraints does not hold
This allows to specify restricted and unrestricted model properties in
three different ways
- fit_constrained result: model contains score and hessian function for
the full, unrestricted model, but the parameter estimate in the results
instance is for the restricted model. This is the case if the model
was estimated with fit_constrained.
- restricted model with variable addition: If exog_extra is not None, then
it is assumed that the current model is a model with zero restrictions
and the unrestricted model is given by adding exog_extra as additional
explanatory variables.
- unrestricted model with restricted parameters explicitly provided. If
params_constrained is not None, then the model is assumed to be for the
unrestricted model, but the provided parameters are for the restricted
model.
TODO: This case will currently only work for `nonrobust` cov_type,
otherwise we will also need the restriction matrix provided by the user.
Parameters
----------
exog_extra : None or array_like
Explanatory variables that are jointly tested for inclusion in the
model, i.e. omitted variables.
params_constrained : array_like
estimated parameter of the restricted model. This can be the
parameter estimate for the current when testing for omitted
variables.
hypothesis : str, 'joint' (default) or 'separate'
If hypothesis is 'joint', then the chisquare test results for the
joint hypothesis that all constraints hold is returned.
If hypothesis is 'joint', then z-test results for each constraint
is returned.
This is currently only implemented for cov_type="nonrobust".
cov_type : str
Warning: only partially implemented so far, currently only "nonrobust"
and "HC0" are supported.
If cov_type is None, then the cov_type specified in fit for the Wald
tests is used.
If the cov_type argument is not None, then it will be used instead of
the Wald cov_type given in fit.
k_constraints : int or None
Number of constraints that were used in the estimation of params
restricted relative to the number of exog in the model.
This must be provided if no exog_extra are given. If exog_extra is
not None, then k_constraints is assumed to be zero if it is None.
observed : bool
If True, then the observed Hessian is used in calculating the
covariance matrix of the score. If false then the expected
information matrix is used. This currently only applies to GLM where
EIM is available.
Warning: This option might still change.
Returns
-------
chi2_stat : float
chisquare statistic for the score test
p-value : float
P-value of the score test based on the chisquare distribution.
df : int
Degrees of freedom used in the p-value calculation. This is equal
to the number of constraints.
Notes
-----
Status: experimental, several options are not implemented yet or are not
verified yet. Currently available ptions might also still change.
cov_type is 'nonrobust':
The covariance matrix for the score is based on the Hessian, i.e.
observed information matrix or optionally on the expected information
matrix.
cov_type is 'HC0'
The covariance matrix of the score is the simple empirical covariance of
score_obs without degrees of freedom correction.
"""
# TODO: we are computing unnecessary things for cov_type nonrobust
if hasattr(self, "_results"):
# use numpy if we have wrapper, not relevant if method
self = self._results
model = self.model
nobs = model.endog.shape[0] # model.nobs
# discrete Poisson does not have nobs
if params_constrained is None:
params_constrained = self.params
cov_type = cov_type if cov_type is not None else self.cov_type
if observed is False:
hess_kwd = {'observed': False}
else:
hess_kwd = {}
if exog_extra is None:
if hasattr(self, 'constraints'):
if isinstance(self.constraints, tuple):
r_matrix = self.constraints[0]
else:
r_matrix = self.constraints.coefs
k_constraints = r_matrix.shape[0]
else:
if k_constraints is None:
raise ValueError('if exog_extra is None, then k_constraints'
'needs to be given')
# we need to use results scale as additional parameter
if scale is not None:
# we need to use results scale as additional parameter, gh #7840
score_kwd = {'scale': scale}
hess_kwd['scale'] = scale
else:
score_kwd = {}
# duplicate computation of score, might not be needed
score = model.score(params_constrained, **score_kwd)
score_obs = model.score_obs(params_constrained, **score_kwd)
hessian = model.hessian(params_constrained, **hess_kwd)
else:
if cov_type == 'V':
raise ValueError('if exog_extra is not None, then cov_type cannot '
'be V')
if hasattr(self, 'constraints'):
raise NotImplementedError('if exog_extra is not None, then self'
'should not be a constrained fit result')
if isinstance(exog_extra, tuple):
sh = _scorehess_extra(self, params_constrained, *exog_extra,
hess_kwds=hess_kwd)
score_obs, hessian, k_constraints, r_matrix = sh
score = score_obs.sum(0)
else:
exog_extra = np.asarray(exog_extra)
k_constraints = 0
ex = np.column_stack((model.exog, exog_extra))
# this uses shape not matrix rank to determine k_constraints
# requires nonsingular (no added perfect collinearity)
k_constraints += ex.shape[1] - model.exog.shape[1]
# TODO use diag instead of full np.eye
r_matrix = np.eye(len(self.params) + k_constraints
)[-k_constraints:]
score_factor = model.score_factor(params_constrained)
if score_factor.ndim == 1:
score_obs = (score_factor[:, None] * ex)
else:
sf = score_factor
score_obs = np.column_stack((sf[:, :1] * ex, sf[:, 1:]))
score = score_obs.sum(0)
hessian_factor = model.hessian_factor(params_constrained,
**hess_kwd)
# see #4714
from statsmodels.genmod.generalized_linear_model import GLM
if isinstance(model, GLM):
hessian_factor *= -1
hessian = np.dot(ex.T * hessian_factor, ex)
if cov_type == 'nonrobust':
cov_score_test = -hessian
elif cov_type.upper() == 'HC0':
hinv = -np.linalg.inv(hessian)
cov_score = nobs * np.cov(score_obs.T)
# temporary to try out
lm = _lm_robust(score, r_matrix, hinv, cov_score, cov_params=None)
return lm
# alternative is to use only the center, but it is singular
# https://github.com/statsmodels/statsmodels/pull/2096#issuecomment-393646205
# cov_score_test_inv = cov_lm_robust(score, r_matrix, hinv,
# cov_score, cov_params=None)
elif cov_type.upper() == 'V':
# TODO: this does not work, V in fit_constrained results is singular
# we need cov_params without the zeros in it
hinv = -np.linalg.inv(hessian)
cov_score = nobs * np.cov(score_obs.T)
V = self.cov_params_default
# temporary to try out
chi2stat = _lm_robust(score, r_matrix, hinv, cov_score, cov_params=V)
pval = stats.chi2.sf(chi2stat, k_constraints)
return chi2stat, pval
else:
msg = 'Only cov_type "nonrobust" and "HC0" are available.'
raise NotImplementedError(msg)
if hypothesis == 'joint':
chi2stat = score.dot(np.linalg.solve(cov_score_test, score[:, None]))
pval = stats.chi2.sf(chi2stat, k_constraints)
# return a stats results instance instead? Contrast?
return chi2stat, pval, k_constraints
elif hypothesis == 'separate':
diff = score
bse = np.sqrt(np.diag(cov_score_test))
stat = diff / bse
pval = stats.norm.sf(np.abs(stat))*2
return stat, pval
else:
raise NotImplementedError('only hypothesis "joint" is available') | score test for restrictions or for omitted variables
Null Hypothesis : constraints are satisfied
Alternative Hypothesis : at least one of the constraints does not hold
This allows to specify restricted and unrestricted model properties in
three different ways
- fit_constrained result: model contains score and hessian function for
the full, unrestricted model, but the parameter estimate in the results
instance is for the restricted model. This is the case if the model
was estimated with fit_constrained.
- restricted model with variable addition: If exog_extra is not None, then
it is assumed that the current model is a model with zero restrictions
and the unrestricted model is given by adding exog_extra as additional
explanatory variables.
- unrestricted model with restricted parameters explicitly provided. If
params_constrained is not None, then the model is assumed to be for the
unrestricted model, but the provided parameters are for the restricted
model.
TODO: This case will currently only work for `nonrobust` cov_type,
otherwise we will also need the restriction matrix provided by the user.
Parameters
----------
exog_extra : None or array_like
Explanatory variables that are jointly tested for inclusion in the
model, i.e. omitted variables.
params_constrained : array_like
estimated parameter of the restricted model. This can be the
parameter estimate for the current when testing for omitted
variables.
hypothesis : str, 'joint' (default) or 'separate'
If hypothesis is 'joint', then the chisquare test results for the
joint hypothesis that all constraints hold is returned.
If hypothesis is 'joint', then z-test results for each constraint
is returned.
This is currently only implemented for cov_type="nonrobust".
cov_type : str
Warning: only partially implemented so far, currently only "nonrobust"
and "HC0" are supported.
If cov_type is None, then the cov_type specified in fit for the Wald
tests is used.
If the cov_type argument is not None, then it will be used instead of
the Wald cov_type given in fit.
k_constraints : int or None
Number of constraints that were used in the estimation of params
restricted relative to the number of exog in the model.
This must be provided if no exog_extra are given. If exog_extra is
not None, then k_constraints is assumed to be zero if it is None.
observed : bool
If True, then the observed Hessian is used in calculating the
covariance matrix of the score. If false then the expected
information matrix is used. This currently only applies to GLM where
EIM is available.
Warning: This option might still change.
Returns
-------
chi2_stat : float
chisquare statistic for the score test
p-value : float
P-value of the score test based on the chisquare distribution.
df : int
Degrees of freedom used in the p-value calculation. This is equal
to the number of constraints.
Notes
-----
Status: experimental, several options are not implemented yet or are not
verified yet. Currently available ptions might also still change.
cov_type is 'nonrobust':
The covariance matrix for the score is based on the Hessian, i.e.
observed information matrix or optionally on the expected information
matrix.
cov_type is 'HC0'
The covariance matrix of the score is the simple empirical covariance of
score_obs without degrees of freedom correction. | score_test | python | statsmodels/statsmodels | statsmodels/base/_parameter_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_parameter_inference.py | BSD-3-Clause |
def _scorehess_extra(self, params=None, exog_extra=None,
exog2_extra=None, hess_kwds=None):
"""Experimental helper function for variable addition score test.
This uses score and hessian factor at the params which should be the
params of the restricted model.
"""
if hess_kwds is None:
hess_kwds = {}
# this corresponds to a model methods, so we need only the model
model = self.model
# as long as we have results instance, we can take params from it
if params is None:
params = self.params
# get original exog from model, currently only if exactly 2
exog_o1, exog_o2 = model._get_exogs()
if exog_o2 is None:
# if extra params is scalar, as in NB, GPP
exog_o2 = np.ones((exog_o1.shape[0], 1))
k_mean = exog_o1.shape[1]
k_prec = exog_o2.shape[1]
if exog_extra is not None:
exog = np.column_stack((exog_o1, exog_extra))
else:
exog = exog_o1
if exog2_extra is not None:
exog2 = np.column_stack((exog_o2, exog2_extra))
else:
exog2 = exog_o2
k_mean_new = exog.shape[1]
k_prec_new = exog2.shape[1]
k_cm = k_mean_new - k_mean
k_cp = k_prec_new - k_prec
k_constraints = k_cm + k_cp
index_mean = np.arange(k_mean, k_mean_new)
index_prec = np.arange(k_mean_new + k_prec, k_mean_new + k_prec_new)
r_matrix = np.zeros((k_constraints, len(params) + k_constraints))
# print(exog.shape, exog2.shape)
# print(r_matrix.shape, k_cm, k_cp, k_mean_new, k_prec_new)
# print(index_mean, index_prec)
r_matrix[:k_cm, index_mean] = np.eye(k_cm)
r_matrix[k_cm: k_cm + k_cp, index_prec] = np.eye(k_cp)
if hasattr(model, "score_hessian_factor"):
sf, hf = model.score_hessian_factor(params, return_hessian=True,
**hess_kwds)
else:
sf = model.score_factor(params)
hf = model.hessian_factor(params, **hess_kwds)
sf1, sf2 = sf
hf11, hf12, hf22 = hf
# elementwise product for each row (observation)
d1 = sf1[:, None] * exog
d2 = sf2[:, None] * exog2
score_obs = np.column_stack((d1, d2))
# elementwise product for each row (observation)
d11 = (exog.T * hf11).dot(exog)
d12 = (exog.T * hf12).dot(exog2)
d22 = (exog2.T * hf22).dot(exog2)
hessian = np.block([[d11, d12], [d12.T, d22]])
return score_obs, hessian, k_constraints, r_matrix | Experimental helper function for variable addition score test.
This uses score and hessian factor at the params which should be the
params of the restricted model. | _scorehess_extra | python | statsmodels/statsmodels | statsmodels/base/_parameter_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_parameter_inference.py | BSD-3-Clause |
def tic(results):
"""Takeuchi information criterion for misspecified models
"""
imr = getattr(results, "im_ratio", im_ratio(results))
tic = - 2 * results.llf + 2 * np.trace(imr)
return tic | Takeuchi information criterion for misspecified models | tic | python | statsmodels/statsmodels | statsmodels/base/_parameter_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_parameter_inference.py | BSD-3-Clause |
def gbic(results, gbicp=False):
"""generalized BIC for misspecified models
References
----------
Lv, Jinchi, and Jun S. Liu. 2014. "Model Selection Principles in
Misspecified Models." Journal of the Royal Statistical Society.
Series B (Statistical Methodology) 76 (1): 141–67.
"""
self = getattr(results, "_results", results)
k_params = self.df_model + 1
nobs = k_params + self.df_resid
imr = getattr(results, "im_ratio", im_ratio(results))
imr_logdet = np.linalg.slogdet(imr)[1]
gbic = -2 * self.llf + k_params * np.log(nobs) - imr_logdet # LL equ. (20)
gbicp = gbic + np.trace(imr) # LL equ. (23)
return gbic, gbicp | generalized BIC for misspecified models
References
----------
Lv, Jinchi, and Jun S. Liu. 2014. "Model Selection Principles in
Misspecified Models." Journal of the Royal Statistical Society.
Series B (Statistical Methodology) 76 (1): 141–67. | gbic | python | statsmodels/statsmodels | statsmodels/base/_parameter_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_parameter_inference.py | BSD-3-Clause |
def _est_regularized_naive(mod, pnum, partitions, fit_kwds=None):
"""estimates the regularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
Returns
-------
An array of the parameters for the regularized fit
"""
if fit_kwds is None:
raise ValueError("_est_regularized_naive currently " +
"requires that fit_kwds not be None.")
return mod.fit_regularized(**fit_kwds).params | estimates the regularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
Returns
-------
An array of the parameters for the regularized fit | _est_regularized_naive | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _est_unregularized_naive(mod, pnum, partitions, fit_kwds=None):
"""estimates the unregularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit
Returns
-------
An array of the parameters for the fit
"""
if fit_kwds is None:
raise ValueError("_est_unregularized_naive currently " +
"requires that fit_kwds not be None.")
return mod.fit(**fit_kwds).params | estimates the unregularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit
Returns
-------
An array of the parameters for the fit | _est_unregularized_naive | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _join_naive(params_l, threshold=0):
"""joins the results from each run of _est_<type>_naive
and returns the mean estimate of the coefficients
Parameters
----------
params_l : list
A list of arrays of coefficients.
threshold : scalar
The threshold at which the coefficients will be cut.
"""
p = len(params_l[0])
partitions = len(params_l)
params_mn = np.zeros(p)
for params in params_l:
params_mn += params
params_mn /= partitions
params_mn[np.abs(params_mn) < threshold] = 0
return params_mn | joins the results from each run of _est_<type>_naive
and returns the mean estimate of the coefficients
Parameters
----------
params_l : list
A list of arrays of coefficients.
threshold : scalar
The threshold at which the coefficients will be cut. | _join_naive | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _calc_grad(mod, params, alpha, L1_wt, score_kwds):
"""calculates the log-likelihood gradient for the debiasing
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
score_kwds : dict-like or None
Keyword arguments for the score function.
Returns
-------
An array-like object of the same dimension as params
Notes
-----
In general:
gradient l_k(params)
where k corresponds to the index of the partition
For OLS:
X^T(y - X^T params)
"""
grad = -mod.score(np.asarray(params), **score_kwds)
grad += alpha * (1 - L1_wt)
return grad | calculates the log-likelihood gradient for the debiasing
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
score_kwds : dict-like or None
Keyword arguments for the score function.
Returns
-------
An array-like object of the same dimension as params
Notes
-----
In general:
gradient l_k(params)
where k corresponds to the index of the partition
For OLS:
X^T(y - X^T params) | _calc_grad | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _calc_wdesign_mat(mod, params, hess_kwds):
"""calculates the weighted design matrix necessary to generate
the approximate inverse covariance matrix
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
hess_kwds : dict-like or None
Keyword arguments for the hessian function.
Returns
-------
An array-like object, updated design matrix, same dimension
as mod.exog
"""
rhess = np.sqrt(mod.hessian_factor(np.asarray(params), **hess_kwds))
return rhess[:, None] * mod.exog | calculates the weighted design matrix necessary to generate
the approximate inverse covariance matrix
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
hess_kwds : dict-like or None
Keyword arguments for the hessian function.
Returns
-------
An array-like object, updated design matrix, same dimension
as mod.exog | _calc_wdesign_mat | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _est_regularized_debiased(mod, mnum, partitions, fit_kwds=None,
score_kwds=None, hess_kwds=None):
"""estimates the regularized fitted parameters, is the default
estimation_method for class DistributedModel.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
mnum : scalar
Index of current partition.
partitions : scalar
Total number of partitions.
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
Returns
-------
A tuple of parameters for regularized fit
An array-like object of the fitted parameters, params
An array-like object for the gradient
A list of array like objects for nodewise_row
A list of array like objects for nodewise_weight
"""
score_kwds = {} if score_kwds is None else score_kwds
hess_kwds = {} if hess_kwds is None else hess_kwds
if fit_kwds is None:
raise ValueError("_est_regularized_debiased currently " +
"requires that fit_kwds not be None.")
else:
alpha = fit_kwds["alpha"]
if "L1_wt" in fit_kwds:
L1_wt = fit_kwds["L1_wt"]
else:
L1_wt = 1
nobs, p = mod.exog.shape
p_part = int(np.ceil((1. * p) / partitions))
params = mod.fit_regularized(**fit_kwds).params
grad = _calc_grad(mod, params, alpha, L1_wt, score_kwds) / nobs
wexog = _calc_wdesign_mat(mod, params, hess_kwds)
nodewise_row_l = []
nodewise_weight_l = []
for idx in range(mnum * p_part, min((mnum + 1) * p_part, p)):
nodewise_row = _calc_nodewise_row(wexog, idx, alpha)
nodewise_row_l.append(nodewise_row)
nodewise_weight = _calc_nodewise_weight(wexog, nodewise_row, idx,
alpha)
nodewise_weight_l.append(nodewise_weight)
return params, grad, nodewise_row_l, nodewise_weight_l | estimates the regularized fitted parameters, is the default
estimation_method for class DistributedModel.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
mnum : scalar
Index of current partition.
partitions : scalar
Total number of partitions.
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
Returns
-------
A tuple of parameters for regularized fit
An array-like object of the fitted parameters, params
An array-like object for the gradient
A list of array like objects for nodewise_row
A list of array like objects for nodewise_weight | _est_regularized_debiased | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _join_debiased(results_l, threshold=0):
"""joins the results from each run of _est_regularized_debiased
and returns the debiased estimate of the coefficients
Parameters
----------
results_l : list
A list of tuples each one containing the params, grad,
nodewise_row and nodewise_weight values for each partition.
threshold : scalar
The threshold at which the coefficients will be cut.
"""
p = len(results_l[0][0])
partitions = len(results_l)
params_mn = np.zeros(p)
grad_mn = np.zeros(p)
nodewise_row_l = []
nodewise_weight_l = []
for r in results_l:
params_mn += r[0]
grad_mn += r[1]
nodewise_row_l.extend(r[2])
nodewise_weight_l.extend(r[3])
nodewise_row_l = np.array(nodewise_row_l)
nodewise_weight_l = np.array(nodewise_weight_l)
params_mn /= partitions
grad_mn *= -1. / partitions
approx_inv_cov = _calc_approx_inv_cov(nodewise_row_l, nodewise_weight_l)
debiased_params = params_mn + approx_inv_cov.dot(grad_mn)
debiased_params[np.abs(debiased_params) < threshold] = 0
return debiased_params | joins the results from each run of _est_regularized_debiased
and returns the debiased estimate of the coefficients
Parameters
----------
results_l : list
A list of tuples each one containing the params, grad,
nodewise_row and nodewise_weight values for each partition.
threshold : scalar
The threshold at which the coefficients will be cut. | _join_debiased | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _helper_fit_partition(self, pnum, endog, exog, fit_kwds,
init_kwds_e={}):
"""handles the model fitting for each machine. NOTE: this
is primarily handled outside of DistributedModel because
joblib cannot handle class methods.
Parameters
----------
self : DistributedModel class instance
An instance of DistributedModel.
pnum : scalar
index of current partition.
endog : array_like
endogenous data for current partition.
exog : array_like
exogenous data for current partition.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_e : dict-like
Additional init_kwds to add for each partition.
Returns
-------
estimation_method result. For the default,
_est_regularized_debiased, a tuple.
"""
temp_init_kwds = self.init_kwds.copy()
temp_init_kwds.update(init_kwds_e)
model = self.model_class(endog, exog, **temp_init_kwds)
results = self.estimation_method(model, pnum, self.partitions,
fit_kwds=fit_kwds,
**self.estimation_kwds)
return results | handles the model fitting for each machine. NOTE: this
is primarily handled outside of DistributedModel because
joblib cannot handle class methods.
Parameters
----------
self : DistributedModel class instance
An instance of DistributedModel.
pnum : scalar
index of current partition.
endog : array_like
endogenous data for current partition.
exog : array_like
exogenous data for current partition.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_e : dict-like
Additional init_kwds to add for each partition.
Returns
-------
estimation_method result. For the default,
_est_regularized_debiased, a tuple. | _helper_fit_partition | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def fit(self, data_generator, fit_kwds=None, parallel_method="sequential",
parallel_backend=None, init_kwds_generator=None):
"""Performs the distributed estimation using the corresponding
DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like or None
Keywords needed for the model fitting.
parallel_method : str
type of distributed estimation to be used, currently
"sequential", "joblib" and "dask" are supported.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
if fit_kwds is None:
fit_kwds = {}
if parallel_method == "sequential":
results_l = self.fit_sequential(data_generator, fit_kwds,
init_kwds_generator)
elif parallel_method == "joblib":
results_l = self.fit_joblib(data_generator, fit_kwds,
parallel_backend,
init_kwds_generator)
else:
raise ValueError("parallel_method: %s is currently not supported"
% parallel_method)
params = self.join_method(results_l, **self.join_kwds)
# NOTE that currently, the dummy result model that is initialized
# here does not use any init_kwds from the init_kwds_generator event
# if it is provided. It is possible to imagine an edge case where
# this might be a problem but given that the results model instance
# does not correspond to any data partition this seems reasonable.
res_mod = self.model_class([0], [0], **self.init_kwds)
return self.results_class(res_mod, params, **self.results_kwds) | Performs the distributed estimation using the corresponding
DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like or None
Keywords needed for the model fitting.
parallel_method : str
type of distributed estimation to be used, currently
"sequential", "joblib" and "dask" are supported.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array. | fit | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def fit_sequential(self, data_generator, fit_kwds,
init_kwds_generator=None):
"""Sequentially performs the distributed estimation using
the corresponding DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
results_l = []
if init_kwds_generator is None:
for pnum, (endog, exog) in enumerate(data_generator):
results = _helper_fit_partition(self, pnum, endog, exog,
fit_kwds)
results_l.append(results)
else:
tup_gen = enumerate(zip(data_generator,
init_kwds_generator))
for pnum, ((endog, exog), init_kwds_e) in tup_gen:
results = _helper_fit_partition(self, pnum, endog, exog,
fit_kwds, init_kwds_e)
results_l.append(results)
return results_l | Sequentially performs the distributed estimation using
the corresponding DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array. | fit_sequential | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def fit_joblib(self, data_generator, fit_kwds, parallel_backend,
init_kwds_generator=None):
"""Performs the distributed estimation in parallel using joblib
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
from statsmodels.tools.parallel import parallel_func
par, f, n_jobs = parallel_func(_helper_fit_partition, self.partitions)
if parallel_backend is None and init_kwds_generator is None:
results_l = par(f(self, pnum, endog, exog, fit_kwds)
for pnum, (endog, exog)
in enumerate(data_generator))
elif parallel_backend is not None and init_kwds_generator is None:
with parallel_backend:
results_l = par(f(self, pnum, endog, exog, fit_kwds)
for pnum, (endog, exog)
in enumerate(data_generator))
elif parallel_backend is None and init_kwds_generator is not None:
tup_gen = enumerate(zip(data_generator, init_kwds_generator))
results_l = par(f(self, pnum, endog, exog, fit_kwds, init_kwds)
for pnum, ((endog, exog), init_kwds)
in tup_gen)
elif parallel_backend is not None and init_kwds_generator is not None:
tup_gen = enumerate(zip(data_generator, init_kwds_generator))
with parallel_backend:
results_l = par(f(self, pnum, endog, exog, fit_kwds, init_kwds)
for pnum, ((endog, exog), init_kwds)
in tup_gen)
return results_l | Performs the distributed estimation in parallel using joblib
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array. | fit_joblib | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def predict(self, exog, *args, **kwargs):
"""Calls self.model.predict for the provided exog. See
Results.predict.
Parameters
----------
exog : array_like NOT optional
The values for which we want to predict, unlike standard
predict this is NOT optional since the data in self.model
is fake.
*args :
Some models can take additional arguments. See the
predict method of the model for the details.
**kwargs :
Some models can take additional keywords arguments. See the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict
"""
return self.model.predict(self.params, exog, *args, **kwargs) | Calls self.model.predict for the provided exog. See
Results.predict.
Parameters
----------
exog : array_like NOT optional
The values for which we want to predict, unlike standard
predict this is NOT optional since the data in self.model
is fake.
*args :
Some models can take additional arguments. See the
predict method of the model for the details.
**kwargs :
Some models can take additional keywords arguments. See the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict | predict | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _gen_npfuncs(k, L1_wt, alpha, loglike_kwds, score_kwds, hess_kwds):
"""
Negative penalized log-likelihood functions.
Returns the negative penalized log-likelihood, its derivative, and
its Hessian. The penalty only includes the smooth (L2) term.
All three functions have argument signature (x, model), where
``x`` is a point in the parameter space and ``model`` is an
arbitrary statsmodels regression model.
"""
def nploglike(params, model):
nobs = model.nobs
pen_llf = alpha[k] * (1 - L1_wt) * np.sum(params**2) / 2
llf = model.loglike(np.r_[params], **loglike_kwds)
return - llf / nobs + pen_llf
def npscore(params, model):
nobs = model.nobs
pen_grad = alpha[k] * (1 - L1_wt) * params
gr = -model.score(np.r_[params], **score_kwds)[0] / nobs
return gr + pen_grad
def nphess(params, model):
nobs = model.nobs
pen_hess = alpha[k] * (1 - L1_wt)
h = -model.hessian(np.r_[params], **hess_kwds)[0, 0] / nobs + pen_hess
return h
return nploglike, npscore, nphess | Negative penalized log-likelihood functions.
Returns the negative penalized log-likelihood, its derivative, and
its Hessian. The penalty only includes the smooth (L2) term.
All three functions have argument signature (x, model), where
``x`` is a point in the parameter space and ``model`` is an
arbitrary statsmodels regression model. | _gen_npfuncs | python | statsmodels/statsmodels | statsmodels/base/elastic_net.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/elastic_net.py | BSD-3-Clause |
def fit_elasticnet(model, method="coord_descent", maxiter=100,
alpha=0., L1_wt=1., start_params=None, cnvrg_tol=1e-7,
zero_tol=1e-8, refit=False, check_step=True,
loglike_kwds=None, score_kwds=None, hess_kwds=None):
"""
Return an elastic net regularized fit to a regression model.
Parameters
----------
model : model object
A statsmodels object implementing ``loglike``, ``score``, and
``hessian``.
method : {'coord_descent'}
Only the coordinate descent algorithm is implemented.
maxiter : int
The maximum number of iteration cycles (an iteration cycle
involves running coordinate descent on all variables).
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
start_params : array_like
Starting values for `params`.
cnvrg_tol : scalar
If `params` changes by less than this amount (in sup-norm)
in one iteration cycle, the algorithm terminates with
convergence.
zero_tol : scalar
Any estimated coefficient smaller than this value is
replaced with zero.
refit : bool
If True, the model is refit using only the variables that have
non-zero coefficients in the regularized fit. The refitted
model is not regularized.
check_step : bool
If True, confirm that the first step is an improvement and search
further if it is not.
loglike_kwds : dict-like or None
Keyword arguments for the log-likelihood function.
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
Returns
-------
Results
A results object.
Notes
-----
The ``elastic net`` penalty is a combination of L1 and L2
penalties.
The function that is minimized is:
-loglike/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where |*|_1 and |*|_2 are the L1 and L2 norms.
The computational approach used here is to obtain a quadratic
approximation to the smooth part of the target function:
-loglike/n + alpha*(1-L1_wt)*|params|_2^2/2
then repeatedly optimize the L1 penalized version of this function
along coordinate axes.
"""
k_exog = model.exog.shape[1]
loglike_kwds = {} if loglike_kwds is None else loglike_kwds
score_kwds = {} if score_kwds is None else score_kwds
hess_kwds = {} if hess_kwds is None else hess_kwds
if np.isscalar(alpha):
alpha = alpha * np.ones(k_exog)
# Define starting params
if start_params is None:
params = np.zeros(k_exog)
else:
params = start_params.copy()
btol = 1e-4
params_zero = np.zeros(len(params), dtype=bool)
init_args = model._get_init_kwds()
# we do not need a copy of init_args b/c get_init_kwds provides new dict
init_args['hasconst'] = False
model_offset = init_args.pop('offset', None)
if 'exposure' in init_args and init_args['exposure'] is not None:
if model_offset is None:
model_offset = np.log(init_args.pop('exposure'))
else:
model_offset += np.log(init_args.pop('exposure'))
fgh_list = [
_gen_npfuncs(k, L1_wt, alpha, loglike_kwds, score_kwds, hess_kwds)
for k in range(k_exog)]
converged = False
for itr in range(maxiter):
# Sweep through the parameters
params_save = params.copy()
for k in range(k_exog):
# Under the active set method, if a parameter becomes
# zero we do not try to change it again.
# TODO : give the user the option to switch this off
if params_zero[k]:
continue
# Set the offset to account for the variables that are
# being held fixed in the current coordinate
# optimization.
params0 = params.copy()
params0[k] = 0
offset = np.dot(model.exog, params0)
if model_offset is not None:
offset += model_offset
# Create a one-variable model for optimization.
model_1var = model.__class__(
model.endog, model.exog[:, k], offset=offset, **init_args)
# Do the one-dimensional optimization.
func, grad, hess = fgh_list[k]
params[k] = _opt_1d(
func, grad, hess, model_1var, params[k], alpha[k]*L1_wt,
tol=btol, check_step=check_step)
# Update the active set
if itr > 0 and np.abs(params[k]) < zero_tol:
params_zero[k] = True
params[k] = 0.
# Check for convergence
pchange = np.max(np.abs(params - params_save))
if pchange < cnvrg_tol:
converged = True
break
# Set approximate zero coefficients to be exactly zero
params[np.abs(params) < zero_tol] = 0
if not refit:
results = RegularizedResults(model, params)
results.converged = converged
return RegularizedResultsWrapper(results)
# Fit the reduced model to get standard errors and other
# post-estimation results.
ii = np.flatnonzero(params)
cov = np.zeros((k_exog, k_exog))
init_args = {k: getattr(model, k, None) for k in model._init_keys}
if len(ii) > 0:
model1 = model.__class__(
model.endog, model.exog[:, ii], **init_args)
rslt = model1.fit()
params[ii] = rslt.params
cov[np.ix_(ii, ii)] = rslt.normalized_cov_params
else:
# Hack: no variables were selected but we need to run fit in
# order to get the correct results class. So just fit a model
# with one variable.
model1 = model.__class__(model.endog, model.exog[:, 0], **init_args)
rslt = model1.fit(maxiter=0)
# fit may return a results or a results wrapper
if issubclass(rslt.__class__, wrap.ResultsWrapper):
klass = rslt._results.__class__
else:
klass = rslt.__class__
# Not all models have a scale
if hasattr(rslt, 'scale'):
scale = rslt.scale
else:
scale = 1.
# The degrees of freedom should reflect the number of parameters
# in the refit model, not including the zeros that are displayed
# to indicate which variables were dropped. See issue #1723 for
# discussion about setting df parameters in model and results
# classes.
p, q = model.df_model, model.df_resid
model.df_model = len(ii)
model.df_resid = model.nobs - model.df_model
# Assuming a standard signature for creating results classes.
refit = klass(model, params, cov, scale=scale)
refit.regularized = True
refit.converged = converged
refit.method = method
refit.fit_history = {'iteration': itr + 1}
# Restore df in model class, see issue #1723 for discussion.
model.df_model, model.df_resid = p, q
return refit | Return an elastic net regularized fit to a regression model.
Parameters
----------
model : model object
A statsmodels object implementing ``loglike``, ``score``, and
``hessian``.
method : {'coord_descent'}
Only the coordinate descent algorithm is implemented.
maxiter : int
The maximum number of iteration cycles (an iteration cycle
involves running coordinate descent on all variables).
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
start_params : array_like
Starting values for `params`.
cnvrg_tol : scalar
If `params` changes by less than this amount (in sup-norm)
in one iteration cycle, the algorithm terminates with
convergence.
zero_tol : scalar
Any estimated coefficient smaller than this value is
replaced with zero.
refit : bool
If True, the model is refit using only the variables that have
non-zero coefficients in the regularized fit. The refitted
model is not regularized.
check_step : bool
If True, confirm that the first step is an improvement and search
further if it is not.
loglike_kwds : dict-like or None
Keyword arguments for the log-likelihood function.
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
Returns
-------
Results
A results object.
Notes
-----
The ``elastic net`` penalty is a combination of L1 and L2
penalties.
The function that is minimized is:
-loglike/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where |*|_1 and |*|_2 are the L1 and L2 norms.
The computational approach used here is to obtain a quadratic
approximation to the smooth part of the target function:
-loglike/n + alpha*(1-L1_wt)*|params|_2^2/2
then repeatedly optimize the L1 penalized version of this function
along coordinate axes. | fit_elasticnet | python | statsmodels/statsmodels | statsmodels/base/elastic_net.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/elastic_net.py | BSD-3-Clause |
def _opt_1d(func, grad, hess, model, start, L1_wt, tol,
check_step=True):
"""
One-dimensional helper for elastic net.
Parameters
----------
func : function
A smooth function of a single variable to be optimized
with L1 penaty.
grad : function
The gradient of `func`.
hess : function
The Hessian of `func`.
model : statsmodels model
The model being fit.
start : real
A starting value for the function argument
L1_wt : non-negative real
The weight for the L1 penalty function.
tol : non-negative real
A convergence threshold.
check_step : bool
If True, check that the first step is an improvement and
use bisection if it is not. If False, return after the
first step regardless.
Notes
-----
``func``, ``grad``, and ``hess`` have argument signature (x,
model), where ``x`` is a point in the parameter space and
``model`` is the model being fit.
If the log-likelihood for the model is exactly quadratic, the
global minimum is returned in one step. Otherwise numerical
bisection is used.
Returns
-------
The argmin of the objective function.
"""
# Overview:
# We want to minimize L(x) + L1_wt*abs(x), where L() is a smooth
# loss function that includes the log-likelihood and L2 penalty.
# This is a 1-dimensional optimization. If L(x) is exactly
# quadratic we can solve for the argmin exactly. Otherwise we
# approximate L(x) with a quadratic function Q(x) and try to use
# the minimizer of Q(x) + L1_wt*abs(x). But if this yields an
# uphill step for the actual target function L(x) + L1_wt*abs(x),
# then we fall back to a expensive line search. The line search
# is never needed for OLS.
x = start
f = func(x, model)
b = grad(x, model)
c = hess(x, model)
d = b - c*x
# The optimum is achieved by hard thresholding to zero
if L1_wt > np.abs(d):
return 0.
# x + h is the minimizer of the Q(x) + L1_wt*abs(x)
if d >= 0:
h = (L1_wt - b) / c
elif d < 0:
h = -(L1_wt + b) / c
else:
return np.nan
# If the new point is not uphill for the target function, take it
# and return. This check is a bit expensive and un-necessary for
# OLS
if not check_step:
return x + h
f1 = func(x + h, model) + L1_wt*np.abs(x + h)
if f1 <= f + L1_wt*np.abs(x) + 1e-10:
return x + h
# Fallback for models where the loss is not quadratic
from scipy.optimize import brent
x_opt = brent(func, args=(model,), brack=(x-1, x+1), tol=tol)
return x_opt | One-dimensional helper for elastic net.
Parameters
----------
func : function
A smooth function of a single variable to be optimized
with L1 penaty.
grad : function
The gradient of `func`.
hess : function
The Hessian of `func`.
model : statsmodels model
The model being fit.
start : real
A starting value for the function argument
L1_wt : non-negative real
The weight for the L1 penalty function.
tol : non-negative real
A convergence threshold.
check_step : bool
If True, check that the first step is an improvement and
use bisection if it is not. If False, return after the
first step regardless.
Notes
-----
``func``, ``grad``, and ``hess`` have argument signature (x,
model), where ``x`` is a point in the parameter space and
``model`` is the model being fit.
If the log-likelihood for the model is exactly quadratic, the
global minimum is returned in one step. Otherwise numerical
bisection is used.
Returns
-------
The argmin of the objective function. | _opt_1d | python | statsmodels/statsmodels | statsmodels/base/elastic_net.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/elastic_net.py | BSD-3-Clause |
def fittedvalues(self):
"""
The predicted values from the model at the estimated parameters.
"""
return self.model.predict(self.params) | The predicted values from the model at the estimated parameters. | fittedvalues | python | statsmodels/statsmodels | statsmodels/base/elastic_net.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/elastic_net.py | BSD-3-Clause |
def _data_gen(endog, exog, partitions):
"""partitions data"""
n_exog = exog.shape[0]
n_part = np.ceil(n_exog / partitions)
n_part = np.floor(n_exog / partitions)
rem = n_exog - n_part * partitions
stp = 0
while stp < (partitions - 1):
ii = int(n_part * stp)
jj = int(n_part * (stp + 1))
yield endog[ii:jj], exog[ii:jj, :]
stp += 1
ii = int(n_part * stp)
jj = int(n_part * (stp + 1) + rem)
yield endog[ii:jj], exog[ii:jj, :] | partitions data | _data_gen | python | statsmodels/statsmodels | statsmodels/base/tests/test_distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/tests/test_distributed_estimation.py | BSD-3-Clause |
def _rep_data_gen(endog, exog, partitions):
"""partitions data"""
n_exog = exog.shape[0]
n_part = np.ceil(n_exog / partitions)
ii = 0
while ii < n_exog:
yield endog, exog
ii += int(n_part) | partitions data | test_repeat_partition._rep_data_gen | python | statsmodels/statsmodels | statsmodels/base/tests/test_distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/tests/test_distributed_estimation.py | BSD-3-Clause |
def test_repeat_partition():
# tests that if we use identical partitions the average is the same
# as the estimate for the full data
np.random.seed(435265)
N = 200
p = 10
m = 1
beta = np.random.normal(size=p)
beta = beta * np.random.randint(0, 2, p)
X = np.random.normal(size=(N, p))
y = X.dot(beta) + np.random.normal(size=N)
def _rep_data_gen(endog, exog, partitions):
"""partitions data"""
n_exog = exog.shape[0]
n_part = np.ceil(n_exog / partitions)
ii = 0
while ii < n_exog:
yield endog, exog
ii += int(n_part)
nv_mod = DistributedModel(m, estimation_method=_est_regularized_naive,
join_method=_join_naive)
fitOLSnv = nv_mod.fit(_rep_data_gen(y, X, m), fit_kwds={"alpha": 0.1})
ols_mod = OLS(y, X)
fitOLS = ols_mod.fit_regularized(alpha=0.1)
assert_allclose(fitOLSnv.params, fitOLS.params) | partitions data | test_repeat_partition | python | statsmodels/statsmodels | statsmodels/base/tests/test_distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/tests/test_distributed_estimation.py | BSD-3-Clause |
def update(self):
"""
Cycle through all Gibbs updates.
"""
self.update_data()
# Need to update data first
self.update_mean()
self.update_cov() | Cycle through all Gibbs updates. | update | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def update_data(self):
"""
Gibbs update of the missing data values.
"""
for ix in self.patterns:
i = ix[0]
ix_miss = np.flatnonzero(self.mask[i, :])
ix_obs = np.flatnonzero(~self.mask[i, :])
mm = self.mean[ix_miss]
mo = self.mean[ix_obs]
voo = self.cov[ix_obs, :][:, ix_obs]
vmm = self.cov[ix_miss, :][:, ix_miss]
vmo = self.cov[ix_miss, :][:, ix_obs]
r = self._data[ix, :][:, ix_obs] - mo
cm = mm + np.dot(vmo, np.linalg.solve(voo, r.T)).T
cv = vmm - np.dot(vmo, np.linalg.solve(voo, vmo.T))
cs = np.linalg.cholesky(cv)
u = np.random.normal(size=(len(ix), len(ix_miss)))
self._data[np.ix_(ix, ix_miss)] = cm + np.dot(u, cs.T)
# Set the user-visible data set.
if self.exog_names is not None:
self.data = pd.DataFrame(
self._data,
columns=self.exog_names,
copy=False)
else:
self.data = self._data | Gibbs update of the missing data values. | update_data | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def update_mean(self):
"""
Gibbs update of the mean vector.
Do not call until update_data has been called once.
"""
# https://stats.stackexchange.com/questions/28744/multivariate-normal-posterior
# Posterior covariance matrix of the mean
cm = np.linalg.solve(self.cov/self.nobs + self.mean_prior,
self.mean_prior / self.nobs)
cm = np.dot(self.cov, cm)
# Posterior mean of the mean
vm = np.linalg.solve(self.cov, self._data.sum(0))
vm = np.dot(cm, vm)
# Sample
r = np.linalg.cholesky(cm)
self.mean = vm + np.dot(r, np.random.normal(0, 1, self.nvar)) | Gibbs update of the mean vector.
Do not call until update_data has been called once. | update_mean | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def update_cov(self):
"""
Gibbs update of the covariance matrix.
Do not call until update_data has been called once.
"""
# https://stats.stackexchange.com/questions/50844/estimating-the-covariance-posterior-distribution-of-a-multivariate-gaussian
r = self._data - self.mean
gr = np.dot(r.T, r)
a = gr + self.cov_prior
df = int(np.ceil(self.nobs + self.cov_prior_df))
r = np.linalg.cholesky(np.linalg.inv(a))
x = np.dot(np.random.normal(size=(df, self.nvar)), r.T)
ma = np.dot(x.T, x)
self.cov = np.linalg.inv(ma) | Gibbs update of the covariance matrix.
Do not call until update_data has been called once. | update_cov | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def fit(self, results_cb=None):
"""
Impute datasets, fit models, and pool results.
Parameters
----------
results_cb : function, optional
If provided, each results instance r is passed through `results_cb`,
then appended to the `results` attribute of the MIResults object.
To save complete results, use `results_cb=lambda x: x`. The default
behavior is to save no results.
Returns
-------
A MIResults object.
"""
par, cov = [], []
all_results = []
for k in range(self.nrep):
for k in range(self.skip+1):
self.imp.update()
da = self.imp.data
if self.xfunc is not None:
da = self.xfunc(da)
if self.formula is None:
model = self.model(*self.model_args_fn(da),
**self.model_kwds_fn(da))
else:
model = self.model.from_formula(
self.formula, *self.model_args_fn(da),
**self.model_kwds_fn(da))
result = model.fit(*self.fit_args(da), **self.fit_kwds(da))
if results_cb is not None:
all_results.append(results_cb(result))
par.append(np.asarray(result.params.copy()))
cov.append(np.asarray(result.cov_params().copy()))
params, cov_params, fmi = self._combine(par, cov)
r = MIResults(self, model, params, cov_params)
r.fmi = fmi
r.results = all_results
return r | Impute datasets, fit models, and pool results.
Parameters
----------
results_cb : function, optional
If provided, each results instance r is passed through `results_cb`,
then appended to the `results` attribute of the MIResults object.
To save complete results, use `results_cb=lambda x: x`. The default
behavior is to save no results.
Returns
-------
A MIResults object. | fit | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def summary(self, title=None, alpha=.05):
"""
Summarize the results of running multiple imputation.
Parameters
----------
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
Significance level for the confidence intervals
Returns
-------
smry : Summary instance
This holds the summary tables and text, which can be
printed or converted to various output formats.
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
float_format = "%8.3f"
info = {}
info["Method:"] = "MI"
info["Model:"] = self.mi.model.__name__
info["Dependent variable:"] = self._model.endog_names
info["Sample size:"] = "%d" % self.mi.imp.data.shape[0]
info["Num. imputations"] = "%d" % self.mi.nrep
smry.add_dict(info, align='l', float_format=float_format)
param = summary2.summary_params(self, alpha=alpha)
param["FMI"] = self.fmi
smry.add_df(param, float_format=float_format)
smry.add_title(title=title, results=self)
return smry | Summarize the results of running multiple imputation.
Parameters
----------
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
Significance level for the confidence intervals
Returns
-------
smry : Summary instance
This holds the summary tables and text, which can be
printed or converted to various output formats. | summary | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def _ros_sort(df, observations, censorship, warn=False):
"""
This function prepares a dataframe for ROS.
It sorts ascending with
left-censored observations first. Censored observations larger than
the maximum uncensored observations are removed from the dataframe.
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
------
sorted_df : DataFrame
The sorted dataframe with all columns dropped except the
observation and censorship columns.
"""
# separate uncensored data from censored data
censored = df[df[censorship]].sort_values(observations, axis=0)
uncensored = df[~df[censorship]].sort_values(observations, axis=0)
if censored[observations].max() > uncensored[observations].max():
censored = censored[censored[observations] <= uncensored[observations].max()]
if warn:
msg = ("Dropping censored observations greater than "
"the max uncensored observation.")
warnings.warn(msg)
combined = pd.concat([censored, uncensored], axis=0)
return combined[[observations, censorship]].reset_index(drop=True) | This function prepares a dataframe for ROS.
It sorts ascending with
left-censored observations first. Censored observations larger than
the maximum uncensored observations are removed from the dataframe.
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
------
sorted_df : DataFrame
The sorted dataframe with all columns dropped except the
observation and censorship columns. | _ros_sort | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def nuncen_above(row):
""" A, the number of uncensored obs above the given threshold.
"""
# index of observations above the lower_dl DL
above = df[observations] >= row['lower_dl']
# index of observations below the upper_dl DL
below = df[observations] < row['upper_dl']
# index of non-detect observations
detect = ~df[censorship]
# return the number of observations where all conditions are True
return df[above & below & detect].shape[0] | A, the number of uncensored obs above the given threshold. | cohn_numbers.nuncen_above | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def nobs_below(row):
""" B, the number of observations (cen & uncen) below the given
threshold
"""
# index of data less than the lower_dl DL
less_than = df[observations] < row['lower_dl']
# index of data less than or equal to the lower_dl DL
less_thanequal = df[observations] <= row['lower_dl']
# index of detects, non-detects
uncensored = ~df[censorship]
censored = df[censorship]
# number observations less than or equal to lower_dl DL and non-detect
LTE_censored = df[less_thanequal & censored].shape[0]
# number of observations less than lower_dl DL and detected
LT_uncensored = df[less_than & uncensored].shape[0]
# return the sum
return LTE_censored + LT_uncensored | B, the number of observations (cen & uncen) below the given
threshold | cohn_numbers.nobs_below | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def ncen_equal(row):
""" C, the number of censored observations at the given
threshold.
"""
censored_index = df[censorship]
censored_data = df[observations][censored_index]
censored_below = censored_data == row['lower_dl']
return censored_below.sum() | C, the number of censored observations at the given
threshold. | cohn_numbers.ncen_equal | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def set_upper_limit(cohn):
""" Sets the upper_dl DL for each row of the Cohn dataframe. """
if cohn.shape[0] > 1:
return cohn['lower_dl'].shift(-1).fillna(value=np.inf)
else:
return [np.inf] | Sets the upper_dl DL for each row of the Cohn dataframe. | cohn_numbers.set_upper_limit | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def compute_PE(A, B):
""" Computes the probability of excedance for each row of the
Cohn dataframe. """
N = len(A)
PE = np.empty(N, dtype='float64')
PE[-1] = 0.0
for j in range(N-2, -1, -1):
PE[j] = PE[j+1] + (1 - PE[j+1]) * A[j] / (A[j] + B[j])
return PE | Computes the probability of excedance for each row of the
Cohn dataframe. | cohn_numbers.compute_PE | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def cohn_numbers(df, observations, censorship):
r"""
Computes the Cohn numbers for the detection limits in the dataset.
The Cohn Numbers are:
- :math:`A_j =` the number of uncensored obs above the jth
threshold.
- :math:`B_j =` the number of observations (cen & uncen) below
the jth threshold.
- :math:`C_j =` the number of censored observations at the jth
threshold.
- :math:`\mathrm{PE}_j =` the probability of exceeding the jth
threshold
- :math:`\mathrm{DL}_j =` the unique, sorted detection limits
- :math:`\mathrm{DL}_{j+1} = \mathrm{DL}_j` shifted down a
single index (row)
Parameters
----------
dataframe : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
cohn : DataFrame
"""
def nuncen_above(row):
""" A, the number of uncensored obs above the given threshold.
"""
# index of observations above the lower_dl DL
above = df[observations] >= row['lower_dl']
# index of observations below the upper_dl DL
below = df[observations] < row['upper_dl']
# index of non-detect observations
detect = ~df[censorship]
# return the number of observations where all conditions are True
return df[above & below & detect].shape[0]
def nobs_below(row):
""" B, the number of observations (cen & uncen) below the given
threshold
"""
# index of data less than the lower_dl DL
less_than = df[observations] < row['lower_dl']
# index of data less than or equal to the lower_dl DL
less_thanequal = df[observations] <= row['lower_dl']
# index of detects, non-detects
uncensored = ~df[censorship]
censored = df[censorship]
# number observations less than or equal to lower_dl DL and non-detect
LTE_censored = df[less_thanequal & censored].shape[0]
# number of observations less than lower_dl DL and detected
LT_uncensored = df[less_than & uncensored].shape[0]
# return the sum
return LTE_censored + LT_uncensored
def ncen_equal(row):
""" C, the number of censored observations at the given
threshold.
"""
censored_index = df[censorship]
censored_data = df[observations][censored_index]
censored_below = censored_data == row['lower_dl']
return censored_below.sum()
def set_upper_limit(cohn):
""" Sets the upper_dl DL for each row of the Cohn dataframe. """
if cohn.shape[0] > 1:
return cohn['lower_dl'].shift(-1).fillna(value=np.inf)
else:
return [np.inf]
def compute_PE(A, B):
""" Computes the probability of excedance for each row of the
Cohn dataframe. """
N = len(A)
PE = np.empty(N, dtype='float64')
PE[-1] = 0.0
for j in range(N-2, -1, -1):
PE[j] = PE[j+1] + (1 - PE[j+1]) * A[j] / (A[j] + B[j])
return PE
# unique, sorted detection limts
censored_data = df[censorship]
DLs = pd.unique(df.loc[censored_data, observations])
DLs.sort()
# if there is a observations smaller than the minimum detection limit,
# add that value to the array
if DLs.shape[0] > 0:
if df[observations].min() < DLs.min():
DLs = np.hstack([df[observations].min(), DLs])
# create a dataframe
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
cohn = pd.DataFrame(DLs, columns=['lower_dl'])
cohn.loc[:, 'upper_dl'] = set_upper_limit(cohn)
cohn.loc[:, 'nuncen_above'] = cohn.apply(nuncen_above, axis=1)
cohn.loc[:, 'nobs_below'] = cohn.apply(nobs_below, axis=1)
cohn.loc[:, 'ncen_equal'] = cohn.apply(ncen_equal, axis=1)
cohn = cohn.reindex(range(DLs.shape[0] + 1))
cohn.loc[:, 'prob_exceedance'] = compute_PE(cohn['nuncen_above'], cohn['nobs_below'])
else:
dl_cols = ['lower_dl', 'upper_dl', 'nuncen_above',
'nobs_below', 'ncen_equal', 'prob_exceedance']
cohn = pd.DataFrame(np.empty((0, len(dl_cols))), columns=dl_cols)
return cohn | def nuncen_above(row):
""" A, the number of uncensored obs above the given threshold. | cohn_numbers | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _detection_limit_index(obs, cohn):
"""
Locates the corresponding detection limit for each observation.
Basically, creates an array of indices for the detection limits
(Cohn numbers) corresponding to each data point.
Parameters
----------
obs : float
A single observation from the larger dataset.
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
det_limit_index : int
The index of the corresponding detection limit in `cohn`
See Also
--------
cohn_numbers
"""
if cohn.shape[0] > 0:
index, = np.where(cohn['lower_dl'] <= obs)
det_limit_index = index[-1]
else:
det_limit_index = 0
return det_limit_index | Locates the corresponding detection limit for each observation.
Basically, creates an array of indices for the detection limits
(Cohn numbers) corresponding to each data point.
Parameters
----------
obs : float
A single observation from the larger dataset.
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
det_limit_index : int
The index of the corresponding detection limit in `cohn`
See Also
--------
cohn_numbers | _detection_limit_index | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _ros_group_rank(df, dl_idx, censorship):
"""
Ranks each observation within the data groups.
In this case, the groups are defined by the record's detection
limit index and censorship status.
Parameters
----------
df : DataFrame
dl_idx : str
Name of the column in the dataframe the index of the
observations' corresponding detection limit in the `cohn`
dataframe.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
ranks : ndarray
Array of ranks for the dataset.
"""
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
ranks = df.copy()
ranks.loc[:, 'rank'] = 1
ranks = (
ranks.groupby(by=[dl_idx, censorship])['rank']
.transform(lambda g: g.cumsum())
)
return ranks | Ranks each observation within the data groups.
In this case, the groups are defined by the record's detection
limit index and censorship status.
Parameters
----------
df : DataFrame
dl_idx : str
Name of the column in the dataframe the index of the
observations' corresponding detection limit in the `cohn`
dataframe.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
ranks : ndarray
Array of ranks for the dataset. | _ros_group_rank | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _ros_plot_pos(row, censorship, cohn):
"""
ROS-specific plotting positions.
Computes the plotting position for an observation based on its rank,
censorship status, and detection limit index.
Parameters
----------
row : {Series, dict}
Full observation (row) from a censored dataset. Requires a
'rank', 'detection_limit', and `censorship` column.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
plotting_position : float
See Also
--------
cohn_numbers
"""
DL_index = row['det_limit_index']
rank = row['rank']
censored = row[censorship]
dl_1 = cohn.iloc[DL_index]
dl_2 = cohn.iloc[DL_index + 1]
if censored:
return (1 - dl_1['prob_exceedance']) * rank / (dl_1['ncen_equal']+1)
else:
return (1 - dl_1['prob_exceedance']) + (dl_1['prob_exceedance'] - dl_2['prob_exceedance']) * \
rank / (dl_1['nuncen_above']+1) | ROS-specific plotting positions.
Computes the plotting position for an observation based on its rank,
censorship status, and detection limit index.
Parameters
----------
row : {Series, dict}
Full observation (row) from a censored dataset. Requires a
'rank', 'detection_limit', and `censorship` column.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
plotting_position : float
See Also
--------
cohn_numbers | _ros_plot_pos | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _norm_plot_pos(observations):
"""
Computes standard normal (Gaussian) plotting positions using scipy.
Parameters
----------
observations : array_like
Sequence of observed quantities.
Returns
-------
plotting_position : array of floats
"""
ppos, sorted_res = stats.probplot(observations, fit=False)
return stats.norm.cdf(ppos) | Computes standard normal (Gaussian) plotting positions using scipy.
Parameters
----------
observations : array_like
Sequence of observed quantities.
Returns
-------
plotting_position : array of floats | _norm_plot_pos | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def plotting_positions(df, censorship, cohn):
"""
Compute the plotting positions for the observations.
The ROS-specific plotting postions are based on the observations'
rank, censorship status, and corresponding detection limit.
Parameters
----------
df : DataFrame
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
plotting_position : array of float
See Also
--------
cohn_numbers
"""
plot_pos = df.apply(lambda r: _ros_plot_pos(r, censorship, cohn), axis=1)
# correctly sort the plotting positions of the ND data:
ND_plotpos = plot_pos[df[censorship]]
ND_plotpos_arr = np.sort(np.array(ND_plotpos))
plot_pos.loc[df[censorship].index[df[censorship]]] = ND_plotpos_arr
return plot_pos | Compute the plotting positions for the observations.
The ROS-specific plotting postions are based on the observations'
rank, censorship status, and corresponding detection limit.
Parameters
----------
df : DataFrame
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
plotting_position : array of float
See Also
--------
cohn_numbers | plotting_positions | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _impute(df, observations, censorship, transform_in, transform_out):
"""
Executes the basic regression on order stat (ROS) proceedure.
Uses ROS to impute censored from the best-fit line of a
probability plot of the uncensored values.
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`np.log` and `np.exp` are used, respectively.
Returns
-------
estimated : DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else.
"""
# detect/non-detect selectors
uncensored_mask = ~df[censorship]
censored_mask = df[censorship]
# fit a line to the logs of the detected data
fit_params = stats.linregress(
df['Zprelim'][uncensored_mask],
transform_in(df[observations][uncensored_mask])
)
# pull out the slope and intercept for use later
slope, intercept = fit_params[:2]
# model the data based on the best-fit curve
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
df.loc[:, 'estimated'] = transform_out(slope * df['Zprelim'][censored_mask] + intercept)
df.loc[:, 'final'] = np.where(df[censorship], df['estimated'], df[observations])
return df | Executes the basic regression on order stat (ROS) proceedure.
Uses ROS to impute censored from the best-fit line of a
probability plot of the uncensored values.
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`np.log` and `np.exp` are used, respectively.
Returns
-------
estimated : DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else. | _impute | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _do_ros(df, observations, censorship, transform_in, transform_out):
"""
DataFrame-centric function to impute censored valies with ROS.
Prepares a dataframe for, and then esimates the values of a censored
dataset using Regression on Order Statistics
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`np.log` and `np.exp` are used, respectively.
Returns
-------
estimated : DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else.
"""
# compute the Cohn numbers
cohn = cohn_numbers(df, observations=observations, censorship=censorship)
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
modeled = _ros_sort(df, observations=observations, censorship=censorship)
modeled.loc[:, 'det_limit_index'] = modeled[observations].apply(_detection_limit_index, args=(cohn,))
modeled.loc[:, 'rank'] = _ros_group_rank(modeled, 'det_limit_index', censorship)
modeled.loc[:, 'plot_pos'] = plotting_positions(modeled, censorship, cohn)
modeled.loc[:, 'Zprelim'] = stats.norm.ppf(modeled['plot_pos'])
return _impute(modeled, observations, censorship, transform_in, transform_out) | DataFrame-centric function to impute censored valies with ROS.
Prepares a dataframe for, and then esimates the values of a censored
dataset using Regression on Order Statistics
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`np.log` and `np.exp` are used, respectively.
Returns
-------
estimated : DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else. | _do_ros | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def impute_ros(observations, censorship, df=None, min_uncensored=2,
max_fraction_censored=0.8, substitution_fraction=0.5,
transform_in=np.log, transform_out=np.exp,
as_array=True):
"""
Impute censored dataset using Regression on Order Statistics (ROS).
Method described in *Nondetects and Data Analysis* by Dennis R.
Helsel (John Wiley, 2005) to estimate the left-censored (non-detect)
values of a dataset. When there is insufficient non-censorded data,
simple substitution is used.
Parameters
----------
observations : str or array-like
Label of the column or the float array of censored observations
censorship : str
Label of the column or the bool array of the censorship
status of the observations.
* True if censored,
* False if uncensored
df : DataFrame, optional
If `observations` and `censorship` are labels, this is the
DataFrame that contains those columns.
min_uncensored : int (default is 2)
The minimum number of uncensored values required before ROS
can be used to impute the censored observations. When this
criterion is not met, simple substituion is used instead.
max_fraction_censored : float (default is 0.8)
The maximum fraction of censored data below which ROS can be
used to impute the censored observations. When this fraction is
exceeded, simple substituion is used instead.
substitution_fraction : float (default is 0.5)
The fraction of the detection limit to be used during simple
substitution of the censored values.
transform_in : callable (default is np.log)
Transformation to be applied to the values prior to fitting a
line to the plotting positions vs. uncensored values.
transform_out : callable (default is np.exp)
Transformation to be applied to the imputed censored values
estimated from the previously computed best-fit line.
as_array : bool (default is True)
When True, a numpy array of the imputed observations is
returned. Otherwise, a modified copy of the original dataframe
with all of the intermediate calculations is returned.
Returns
-------
imputed : {ndarray, DataFrame}
The final observations where the censored values have either been
imputed through ROS or substituted as a fraction of the
detection limit.
Notes
-----
This function requires pandas 0.14 or more recent.
"""
# process arrays into a dataframe, if necessary
if df is None:
df = pd.DataFrame({'obs': observations, 'cen': censorship})
observations = 'obs'
censorship = 'cen'
# basic counts/metrics of the dataset
N_observations = df.shape[0]
N_censored = df[censorship].astype(int).sum()
N_uncensored = N_observations - N_censored
fraction_censored = N_censored / N_observations
# add plotting positions if there are no censored values
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
if N_censored == 0:
output = df[[observations, censorship]].copy()
output.loc[:, 'final'] = df[observations]
# substitute w/ fraction of the DLs if there's insufficient
# uncensored data
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
elif (N_uncensored < min_uncensored) or (fraction_censored > max_fraction_censored):
output = df[[observations, censorship]].copy()
output.loc[:, 'final'] = df[observations]
output.loc[df[censorship], 'final'] *= substitution_fraction
# normal ROS stuff
else:
output = _do_ros(df, observations, censorship, transform_in, transform_out)
# convert to an array if necessary
if as_array:
output = output['final'].values
return output | Impute censored dataset using Regression on Order Statistics (ROS).
Method described in *Nondetects and Data Analysis* by Dennis R.
Helsel (John Wiley, 2005) to estimate the left-censored (non-detect)
values of a dataset. When there is insufficient non-censorded data,
simple substitution is used.
Parameters
----------
observations : str or array-like
Label of the column or the float array of censored observations
censorship : str
Label of the column or the bool array of the censorship
status of the observations.
* True if censored,
* False if uncensored
df : DataFrame, optional
If `observations` and `censorship` are labels, this is the
DataFrame that contains those columns.
min_uncensored : int (default is 2)
The minimum number of uncensored values required before ROS
can be used to impute the censored observations. When this
criterion is not met, simple substituion is used instead.
max_fraction_censored : float (default is 0.8)
The maximum fraction of censored data below which ROS can be
used to impute the censored observations. When this fraction is
exceeded, simple substituion is used instead.
substitution_fraction : float (default is 0.5)
The fraction of the detection limit to be used during simple
substitution of the censored values.
transform_in : callable (default is np.log)
Transformation to be applied to the values prior to fitting a
line to the plotting positions vs. uncensored values.
transform_out : callable (default is np.exp)
Transformation to be applied to the imputed censored values
estimated from the previously computed best-fit line.
as_array : bool (default is True)
When True, a numpy array of the imputed observations is
returned. Otherwise, a modified copy of the original dataframe
with all of the intermediate calculations is returned.
Returns
-------
imputed : {ndarray, DataFrame}
The final observations where the censored values have either been
imputed through ROS or substituted as a fraction of the
detection limit.
Notes
-----
This function requires pandas 0.14 or more recent. | impute_ros | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def next_sample(self):
"""
Returns the next imputed dataset in the imputation process.
Returns
-------
data : array_like
An imputed dataset from the MICE chain.
Notes
-----
`MICEData` does not have a `skip` parameter. Consecutive
values returned by `next_sample` are immediately consecutive
in the imputation chain.
The returned value is a reference to the data attribute of
the class and should be copied before making any changes.
"""
self.update_all(1)
return self.data | Returns the next imputed dataset in the imputation process.
Returns
-------
data : array_like
An imputed dataset from the MICE chain.
Notes
-----
`MICEData` does not have a `skip` parameter. Consecutive
values returned by `next_sample` are immediately consecutive
in the imputation chain.
The returned value is a reference to the data attribute of
the class and should be copied before making any changes. | next_sample | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def _initial_imputation(self):
"""
Use a PMM-like procedure for initial imputed values.
For each variable, missing values are imputed as the observed
value that is closest to the mean over all observed values.
"""
# Changed for pandas 2.0 copy-on-write behavior to use a single
# in-place fill
imp_values = {}
for col in self.data.columns:
di = self.data[col] - self.data[col].mean()
di = np.abs(di)
ix = di.idxmin()
imp_values[col] = self.data[col].loc[ix]
self.data.fillna(imp_values, inplace=True) | Use a PMM-like procedure for initial imputed values.
For each variable, missing values are imputed as the observed
value that is closest to the mean over all observed values. | _initial_imputation | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def set_imputer(self, endog_name, formula=None, model_class=None,
init_kwds=None, fit_kwds=None, predict_kwds=None,
k_pmm=20, perturbation_method=None, regularized=False):
"""
Specify the imputation process for a single variable.
Parameters
----------
endog_name : str
Name of the variable to be imputed.
formula : str
Conditional formula for imputation. Defaults to a formula
with main effects for all other variables in dataset. The
formula should only include an expression for the mean
structure, e.g. use 'x1 + x2' not 'x4 ~ x1 + x2'.
model_class : statsmodels model
Conditional model for imputation. Defaults to OLS. See below
for more information.
init_kwds : dit-like
Keyword arguments passed to the model init method.
fit_kwds : dict-like
Keyword arguments passed to the model fit method.
predict_kwds : dict-like
Keyword arguments passed to the model predict method.
k_pmm : int
Determines number of neighboring observations from which
to randomly sample when using predictive mean matching.
perturbation_method : str
Either 'gaussian' or 'bootstrap'. Determines the method
for perturbing parameters in the imputation model. If
None, uses the default specified at class initialization.
regularized : dict
If regularized[name]=True, `fit_regularized` rather than
`fit` is called when fitting imputation models for this
variable. When regularized[name]=True for any variable,
perturbation_method must be set to boot.
Notes
-----
The model class must meet the following conditions:
* A model must have a 'fit' method that returns an object.
* The object returned from `fit` must have a `params` attribute
that is an array-like object.
* The object returned from `fit` must have a cov_params method
that returns a square array-like object.
* The model must have a `predict` method.
"""
if formula is None:
main_effects = [x for x in self.data.columns
if x != endog_name]
fml = endog_name + " ~ " + " + ".join(main_effects)
self.conditional_formula[endog_name] = fml
else:
fml = endog_name + " ~ " + formula
self.conditional_formula[endog_name] = fml
if model_class is None:
self.model_class[endog_name] = OLS
else:
self.model_class[endog_name] = model_class
if init_kwds is not None:
self.init_kwds[endog_name] = init_kwds
if fit_kwds is not None:
self.fit_kwds[endog_name] = fit_kwds
if predict_kwds is not None:
self.predict_kwds[endog_name] = predict_kwds
if perturbation_method is not None:
self.perturbation_method[endog_name] = perturbation_method
self.k_pmm = k_pmm
self.regularized[endog_name] = regularized | Specify the imputation process for a single variable.
Parameters
----------
endog_name : str
Name of the variable to be imputed.
formula : str
Conditional formula for imputation. Defaults to a formula
with main effects for all other variables in dataset. The
formula should only include an expression for the mean
structure, e.g. use 'x1 + x2' not 'x4 ~ x1 + x2'.
model_class : statsmodels model
Conditional model for imputation. Defaults to OLS. See below
for more information.
init_kwds : dit-like
Keyword arguments passed to the model init method.
fit_kwds : dict-like
Keyword arguments passed to the model fit method.
predict_kwds : dict-like
Keyword arguments passed to the model predict method.
k_pmm : int
Determines number of neighboring observations from which
to randomly sample when using predictive mean matching.
perturbation_method : str
Either 'gaussian' or 'bootstrap'. Determines the method
for perturbing parameters in the imputation model. If
None, uses the default specified at class initialization.
regularized : dict
If regularized[name]=True, `fit_regularized` rather than
`fit` is called when fitting imputation models for this
variable. When regularized[name]=True for any variable,
perturbation_method must be set to boot.
Notes
-----
The model class must meet the following conditions:
* A model must have a 'fit' method that returns an object.
* The object returned from `fit` must have a `params` attribute
that is an array-like object.
* The object returned from `fit` must have a cov_params method
that returns a square array-like object.
* The model must have a `predict` method. | set_imputer | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.