code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def bsejhj(self):
"""standard deviation of parameter estimates based on covHJH
"""
return np.sqrt(np.diag(self.covjhj)) | standard deviation of parameter estimates based on covHJH | bsejhj | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def bsejac(self):
"""standard deviation of parameter estimates based on covjac
"""
return np.sqrt(np.diag(self.covjac)) | standard deviation of parameter estimates based on covjac | bsejac | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def bootstrap(self, nrep=100, method='nm', disp=0, store=1):
"""simple bootstrap to get mean and variance of estimator
see notes
Parameters
----------
nrep : int
number of bootstrap replications
method : str
optimization method to use
disp : bool
If true, then optimization prints results
store : bool
If true, then parameter estimates for all bootstrap iterations
are attached in self.bootstrap_results
Returns
-------
mean : ndarray
mean of parameter estimates over bootstrap replications
std : ndarray
standard deviation of parameter estimates over bootstrap
replications
Notes
-----
This was mainly written to compare estimators of the standard errors of
the parameter estimates. It uses independent random sampling from the
original endog and exog, and therefore is only correct if observations
are independently distributed.
This will be moved to apply only to models with independently
distributed observations.
"""
results = []
hascloneattr = True if hasattr(self.model, 'cloneattr') else False
for i in range(nrep):
rvsind = np.random.randint(self.nobs, size=self.nobs)
# this needs to set startparam and get other defining attributes
# need a clone method on model
if self.exog is not None:
exog_resamp = self.exog[rvsind, :]
else:
exog_resamp = None
# build auxiliary model and fit
init_kwds = self.model._get_init_kwds()
fitmod = self.model.__class__(self.endog[rvsind],
exog=exog_resamp, **init_kwds)
if hascloneattr:
for attr in self.model.cloneattr:
setattr(fitmod, attr, getattr(self.model, attr))
fitres = fitmod.fit(method=method, disp=disp)
results.append(fitres.params)
results = np.array(results)
if store:
self.bootstrap_results = results
return results.mean(0), results.std(0), results | simple bootstrap to get mean and variance of estimator
see notes
Parameters
----------
nrep : int
number of bootstrap replications
method : str
optimization method to use
disp : bool
If true, then optimization prints results
store : bool
If true, then parameter estimates for all bootstrap iterations
are attached in self.bootstrap_results
Returns
-------
mean : ndarray
mean of parameter estimates over bootstrap replications
std : ndarray
standard deviation of parameter estimates over bootstrap
replications
Notes
-----
This was mainly written to compare estimators of the standard errors of
the parameter estimates. It uses independent random sampling from the
original endog and exog, and therefore is only correct if observations
are independently distributed.
This will be moved to apply only to models with independently
distributed observations. | bootstrap | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def get_nlfun(self, fun):
"""
get_nlfun
This is not Implemented
"""
# I think this is supposed to get the delta method that is currently
# in miscmodels count (as part of Poisson example)
raise NotImplementedError | get_nlfun
This is not Implemented | get_nlfun | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def pseudo_rsquared(self, kind="mcf"):
"""
McFadden's pseudo-R-squared. `1 - (llf / llnull)`
"""
kind = kind.lower()
if kind.startswith("mcf"):
prsq = 1 - self.llf / self.llnull
elif kind.startswith("cox") or kind in ["cs", "lr"]:
prsq = 1 - np.exp((self.llnull - self.llf) * (2 / self.nobs))
else:
raise ValueError("only McFadden and Cox-Snell are available")
return prsq | McFadden's pseudo-R-squared. `1 - (llf / llnull)` | pseudo_rsquared | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def llr(self):
"""
Likelihood ratio chi-squared statistic; `-2*(llnull - llf)`
"""
return -2*(self.llnull - self.llf) | Likelihood ratio chi-squared statistic; `-2*(llnull - llf)` | llr | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def llr_pvalue(self):
"""
The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`.
"""
# see also RegressionModel compare_lr_test
llr = self.llr
df_full = self.df_resid
df_restr = self.df_resid_null
lrdf = (df_restr - df_full)
self.df_lr_null = lrdf
return stats.distributions.chi2.sf(llr, lrdf) | The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`. | llr_pvalue | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def set_null_options(self, llnull=None, attach_results=True, **kwargs):
"""
Set the fit options for the Null (constant-only) model.
This resets the cache for related attributes which is potentially
fragile. This only sets the option, the null model is estimated
when llnull is accessed, if llnull is not yet in cache.
Parameters
----------
llnull : {None, float}
If llnull is not None, then the value will be directly assigned to
the cached attribute "llnull".
attach_results : bool
Sets an internal flag whether the results instance of the null
model should be attached. By default without calling this method,
thenull model results are not attached and only the loglikelihood
value llnull is stored.
**kwargs
Additional keyword arguments used as fit keyword arguments for the
null model. The override and model default values.
Notes
-----
Modifies attributes of this instance, and so has no return.
"""
# reset cache, note we need to add here anything that depends on
# llnullor the null model. If something is missing, then the attribute
# might be incorrect.
self._cache.pop('llnull', None)
self._cache.pop('llr', None)
self._cache.pop('llr_pvalue', None)
self._cache.pop('prsquared', None)
if hasattr(self, 'res_null'):
del self.res_null
if llnull is not None:
self._cache['llnull'] = llnull
self._attach_nullmodel = attach_results
self._optim_kwds_null = kwargs | Set the fit options for the Null (constant-only) model.
This resets the cache for related attributes which is potentially
fragile. This only sets the option, the null model is estimated
when llnull is accessed, if llnull is not yet in cache.
Parameters
----------
llnull : {None, float}
If llnull is not None, then the value will be directly assigned to
the cached attribute "llnull".
attach_results : bool
Sets an internal flag whether the results instance of the null
model should be attached. By default without calling this method,
thenull model results are not attached and only the loglikelihood
value llnull is stored.
**kwargs
Additional keyword arguments used as fit keyword arguments for the
null model. The override and model default values.
Notes
-----
Modifies attributes of this instance, and so has no return. | set_null_options | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def llnull(self):
"""
Value of the constant-only loglikelihood
"""
model = self.model
kwds = model._get_init_kwds().copy()
for key in getattr(model, '_null_drop_keys', []):
del kwds[key]
# TODO: what parameters to pass to fit?
mod_null = model.__class__(model.endog, np.ones(self.nobs), **kwds)
# TODO: consider catching and warning on convergence failure?
# in the meantime, try hard to converge. see
# TestPoissonConstrained1a.test_smoke
optim_kwds = getattr(self, '_optim_kwds_null', {}).copy()
if 'start_params' in optim_kwds:
# user provided
sp_null = optim_kwds.pop('start_params')
elif hasattr(model, '_get_start_params_null'):
# get moment estimates if available
sp_null = model._get_start_params_null()
else:
sp_null = None
opt_kwds = dict(method='bfgs', warn_convergence=False, maxiter=10000,
disp=0)
opt_kwds.update(optim_kwds)
if optim_kwds:
res_null = mod_null.fit(start_params=sp_null, **opt_kwds)
else:
# this should be a reasonably method case across versions
res_null = mod_null.fit(start_params=sp_null, method='nm',
warn_convergence=False,
maxiter=10000, disp=0)
res_null = mod_null.fit(start_params=res_null.params, method='bfgs',
warn_convergence=False,
maxiter=10000, disp=0)
if getattr(self, '_attach_nullmodel', False) is not False:
self.res_null = res_null
self.k_null = len(res_null.params)
self.df_resid_null = res_null.df_resid
return res_null.llf | Value of the constant-only loglikelihood | llnull | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def get_prediction(
self,
exog=None,
which="mean",
transform=True,
row_labels=None,
average=False,
agg_weights=None,
**kwargs
):
"""
Compute prediction results when endpoint transformation is valid.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
which : str
Which statistic is to be predicted. Default is "mean".
The available statistics and options depend on the model.
see the model.predict docstring
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
average : bool
If average is True, then the mean prediction is computed, that is,
predictions are computed for individual exog and then the average
over observation is used.
If average is False, then the results are the predictions for all
observations, i.e. same length as ``exog``.
agg_weights : ndarray, optional
Aggregation weights, only used if average is True.
The weights are not normalized.
**kwargs :
Some models can take additional keyword arguments, such as offset,
exposure or additional exog in multi-part models like zero inflated
models.
See the predict method of the model for the details.
Returns
-------
prediction_results : PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and
summary dataframe for the prediction.
Notes
-----
Status: new in 0.14, experimental
"""
from statsmodels.base._prediction_inference import get_prediction
pred_kwds = kwargs
res = get_prediction(
self,
exog=exog,
which=which,
transform=transform,
row_labels=row_labels,
average=average,
agg_weights=agg_weights,
pred_kwds=pred_kwds
)
return res | Compute prediction results when endpoint transformation is valid.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
which : str
Which statistic is to be predicted. Default is "mean".
The available statistics and options depend on the model.
see the model.predict docstring
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
average : bool
If average is True, then the mean prediction is computed, that is,
predictions are computed for individual exog and then the average
over observation is used.
If average is False, then the results are the predictions for all
observations, i.e. same length as ``exog``.
agg_weights : ndarray, optional
Aggregation weights, only used if average is True.
The weights are not normalized.
**kwargs :
Some models can take additional keyword arguments, such as offset,
exposure or additional exog in multi-part models like zero inflated
models.
See the predict method of the model for the details.
Returns
-------
prediction_results : PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and
summary dataframe for the prediction.
Notes
-----
Status: new in 0.14, experimental | get_prediction | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is "var_xx".
Must match the number of parameters in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Maximum Likelihood']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
]
top_right = [('Log-Likelihood:', None),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Results"
# create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
return smry | Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is "var_xx".
Must match the number of parameters in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results | summary | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def func(self, params):
"""
A penalty function on a vector of parameters.
Parameters
----------
params : array_like
A vector of parameters.
Returns
-------
A scalar penaty value; greater values imply greater
penalization.
"""
raise NotImplementedError | A penalty function on a vector of parameters.
Parameters
----------
params : array_like
A vector of parameters.
Returns
-------
A scalar penaty value; greater values imply greater
penalization. | func | python | statsmodels/statsmodels | statsmodels/base/_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py | BSD-3-Clause |
def deriv(self, params):
"""
The gradient of a penalty function.
Parameters
----------
params : array_like
A vector of parameters
Returns
-------
The gradient of the penalty with respect to each element in
`params`.
"""
raise NotImplementedError | The gradient of a penalty function.
Parameters
----------
params : array_like
A vector of parameters
Returns
-------
The gradient of the penalty with respect to each element in
`params`. | deriv | python | statsmodels/statsmodels | statsmodels/base/_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py | BSD-3-Clause |
def _null_weights(self, params):
"""work around for Null model
This will not be needed anymore when we can use `self._null_drop_keys`
as in DiscreteModels.
TODO: check other models
"""
if np.size(self.weights) > 1:
if len(params) == 1:
raise # raise to identify models where this would be needed
return 0.
return self.weights | work around for Null model
This will not be needed anymore when we can use `self._null_drop_keys`
as in DiscreteModels.
TODO: check other models | _null_weights | python | statsmodels/statsmodels | statsmodels/base/_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py | BSD-3-Clause |
def deriv2(self, params):
"""Second derivative of function
This returns scalar or vector in same shape as params, not a square
Hessian. If the return is 1 dimensional, then it is the diagonal of
the Hessian.
"""
# 3 segments in absolute value
tau = self.tau
p = np.atleast_1d(params)
p_abs = np.abs(p)
res = np.zeros(p_abs.shape)
mask1 = p_abs < tau
mask3 = p_abs >= self.c * tau
mask2 = ~mask1 & ~mask3
res[mask2] = -1 / (self.c - 1)
return self.weights * res | Second derivative of function
This returns scalar or vector in same shape as params, not a square
Hessian. If the return is 1 dimensional, then it is the diagonal of
the Hessian. | deriv2 | python | statsmodels/statsmodels | statsmodels/base/_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py | BSD-3-Clause |
def func(self, params):
"""evaluate penalty function at params
Parameter
---------
params : ndarray
array of parameters at which derivative is evaluated
Returns
-------
deriv2 : ndarray
value(s) of penalty function
"""
# TODO: `and np.size(params) > 1` is hack for llnull, need better solution
# Is this still needed? it seems to work without
if self.restriction is not None:
params = self.restriction.dot(params)
value = self.penalty.func(params)
return (self.weights * value.T).T.sum(0) | evaluate penalty function at params
Parameter
---------
params : ndarray
array of parameters at which derivative is evaluated
Returns
-------
deriv2 : ndarray
value(s) of penalty function | func | python | statsmodels/statsmodels | statsmodels/base/_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py | BSD-3-Clause |
def deriv(self, params):
"""first derivative of penalty function w.r.t. params
Parameter
---------
params : ndarray
array of parameters at which derivative is evaluated
Returns
-------
deriv2 : ndarray
array of first partial derivatives
"""
if self.restriction is not None:
params = self.restriction.dot(params)
value = self.penalty.deriv(params)
if self.restriction is not None:
return self.weights * value.T.dot(self.restriction)
else:
return (self.weights * value.T) | first derivative of penalty function w.r.t. params
Parameter
---------
params : ndarray
array of parameters at which derivative is evaluated
Returns
-------
deriv2 : ndarray
array of first partial derivatives | deriv | python | statsmodels/statsmodels | statsmodels/base/_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py | BSD-3-Clause |
def deriv2(self, params):
"""second derivative of penalty function w.r.t. params
Parameter
---------
params : ndarray
array of parameters at which derivative is evaluated
Returns
-------
deriv2 : ndarray, 2-D
second derivative matrix
"""
if self.restriction is not None:
params = self.restriction.dot(params)
value = self.penalty.deriv2(params)
if self.restriction is not None:
# note: univariate penalty returns 1d array for diag,
# i.e. hessian_diag
v = (self.restriction.T * value * self.weights)
value = v.dot(self.restriction)
else:
value = np.diag(self.weights * value)
return value | second derivative of penalty function w.r.t. params
Parameter
---------
params : ndarray
array of parameters at which derivative is evaluated
Returns
-------
deriv2 : ndarray, 2-D
second derivative matrix | deriv2 | python | statsmodels/statsmodels | statsmodels/base/_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py | BSD-3-Clause |
def func(self, mat, mat_inv):
"""
Parameters
----------
mat : square matrix
The matrix to be penalized.
mat_inv : square matrix
The inverse of `mat`.
Returns
-------
A scalar penalty value
"""
raise NotImplementedError | Parameters
----------
mat : square matrix
The matrix to be penalized.
mat_inv : square matrix
The inverse of `mat`.
Returns
-------
A scalar penalty value | func | python | statsmodels/statsmodels | statsmodels/base/_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py | BSD-3-Clause |
def deriv(self, mat, mat_inv):
"""
Parameters
----------
mat : square matrix
The matrix to be penalized.
mat_inv : square matrix
The inverse of `mat`.
Returns
-------
A vector containing the gradient of the penalty
with respect to each element in the lower triangle
of `mat`.
"""
raise NotImplementedError | Parameters
----------
mat : square matrix
The matrix to be penalized.
mat_inv : square matrix
The inverse of `mat`.
Returns
-------
A vector containing the gradient of the penalty
with respect to each element in the lower triangle
of `mat`. | deriv | python | statsmodels/statsmodels | statsmodels/base/_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py | BSD-3-Clause |
def qc_results(params, alpha, score, qc_tol, qc_verbose=False):
"""
Theory dictates that one of two conditions holds:
i) abs(score[i]) == alpha[i] and params[i] != 0
ii) abs(score[i]) <= alpha[i] and params[i] == 0
qc_results checks to see that (ii) holds, within qc_tol
qc_results also checks for nan or results of the wrong shape.
Parameters
----------
params : ndarray
model parameters. Not including the added variables x_added.
alpha : ndarray
regularization coefficients
score : function
Gradient of unregularized objective function
qc_tol : float
Tolerance to hold conditions (i) and (ii) to for QC check.
qc_verbose : bool
If true, print out a full QC report upon failure
Returns
-------
passed : bool
True if QC check passed
qc_dict : Dictionary
Keys are fprime, alpha, params, passed_array
Prints
------
Warning message if QC check fails.
"""
## Check for fatal errors
assert not np.isnan(params).max()
assert (params == params.ravel('F')).min(), \
"params should have already been 1-d"
## Start the theory compliance check
fprime = score(params)
k_params = len(params)
passed_array = np.array([True] * k_params)
for i in range(k_params):
if alpha[i] > 0:
# If |fprime| is too big, then something went wrong
if (abs(fprime[i]) - alpha[i]) / alpha[i] > qc_tol:
passed_array[i] = False
qc_dict = dict(
fprime=fprime, alpha=alpha, params=params, passed_array=passed_array)
passed = passed_array.min()
if not passed:
num_failed = (~passed_array).sum()
message = 'QC check did not pass for %d out of %d parameters' % (
num_failed, k_params)
message += '\nTry increasing solver accuracy or number of iterations'\
', decreasing alpha, or switch solvers'
if qc_verbose:
message += _get_verbose_addon(qc_dict)
import warnings
warnings.warn(message, ConvergenceWarning)
return passed | Theory dictates that one of two conditions holds:
i) abs(score[i]) == alpha[i] and params[i] != 0
ii) abs(score[i]) <= alpha[i] and params[i] == 0
qc_results checks to see that (ii) holds, within qc_tol
qc_results also checks for nan or results of the wrong shape.
Parameters
----------
params : ndarray
model parameters. Not including the added variables x_added.
alpha : ndarray
regularization coefficients
score : function
Gradient of unregularized objective function
qc_tol : float
Tolerance to hold conditions (i) and (ii) to for QC check.
qc_verbose : bool
If true, print out a full QC report upon failure
Returns
-------
passed : bool
True if QC check passed
qc_dict : Dictionary
Keys are fprime, alpha, params, passed_array
Prints
------
Warning message if QC check fails. | qc_results | python | statsmodels/statsmodels | statsmodels/base/l1_solvers_common.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_solvers_common.py | BSD-3-Clause |
def do_trim_params(params, k_params, alpha, score, passed, trim_mode,
size_trim_tol, auto_trim_tol):
"""
Trims (set to zero) params that are zero at the theoretical minimum.
Uses heuristics to account for the solver not actually finding the minimum.
In all cases, if alpha[i] == 0, then do not trim the ith param.
In all cases, do nothing with the added variables.
Parameters
----------
params : ndarray
model parameters. Not including added variables.
k_params : Int
Number of parameters
alpha : ndarray
regularization coefficients
score : Function.
score(params) should return a 1-d vector of derivatives of the
unpenalized objective function.
passed : bool
True if the QC check passed
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
Returns
-------
params : ndarray
Trimmed model parameters
trimmed : ndarray of booleans
trimmed[i] == True if the ith parameter was trimmed.
"""
## Trim the small params
trimmed = [False] * k_params
if trim_mode == 'off':
trimmed = np.array([False] * k_params)
elif trim_mode == 'auto' and not passed:
import warnings
msg = "Could not trim params automatically due to failed QC check. " \
"Trimming using trim_mode == 'size' will still work."
warnings.warn(msg, ConvergenceWarning)
trimmed = np.array([False] * k_params)
elif trim_mode == 'auto' and passed:
fprime = score(params)
for i in range(k_params):
if alpha[i] != 0:
if (alpha[i] - abs(fprime[i])) / alpha[i] > auto_trim_tol:
params[i] = 0.0
trimmed[i] = True
elif trim_mode == 'size':
for i in range(k_params):
if alpha[i] != 0:
if abs(params[i]) < size_trim_tol:
params[i] = 0.0
trimmed[i] = True
else:
raise ValueError(
"trim_mode == %s, which is not recognized" % (trim_mode))
return params, np.asarray(trimmed) | Trims (set to zero) params that are zero at the theoretical minimum.
Uses heuristics to account for the solver not actually finding the minimum.
In all cases, if alpha[i] == 0, then do not trim the ith param.
In all cases, do nothing with the added variables.
Parameters
----------
params : ndarray
model parameters. Not including added variables.
k_params : Int
Number of parameters
alpha : ndarray
regularization coefficients
score : Function.
score(params) should return a 1-d vector of derivatives of the
unpenalized objective function.
passed : bool
True if the QC check passed
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
Returns
-------
params : ndarray
Trimmed model parameters
trimmed : ndarray of booleans
trimmed[i] == True if the ith parameter was trimmed. | do_trim_params | python | statsmodels/statsmodels | statsmodels/base/l1_solvers_common.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_solvers_common.py | BSD-3-Clause |
def t_test(self, value=0, alternative='two-sided'):
'''z- or t-test for hypothesis that mean is equal to value
Parameters
----------
value : array_like
value under the null hypothesis
alternative : str
'two-sided', 'larger', 'smaller'
Returns
-------
stat : ndarray
test statistic
pvalue : ndarray
p-value of the hypothesis test, the distribution is given by
the attribute of the instance, specified in `__init__`. Default
if not specified is the normal distribution.
'''
# assumes symmetric distribution
stat = (self.predicted - value) / self.se
if alternative in ['two-sided', '2-sided', '2s']:
pvalue = self.dist.sf(np.abs(stat), *self.dist_args)*2
elif alternative in ['larger', 'l']:
pvalue = self.dist.sf(stat, *self.dist_args)
elif alternative in ['smaller', 's']:
pvalue = self.dist.cdf(stat, *self.dist_args)
else:
raise ValueError('invalid alternative')
return stat, pvalue | z- or t-test for hypothesis that mean is equal to value
Parameters
----------
value : array_like
value under the null hypothesis
alternative : str
'two-sided', 'larger', 'smaller'
Returns
-------
stat : ndarray
test statistic
pvalue : ndarray
p-value of the hypothesis test, the distribution is given by
the attribute of the instance, specified in `__init__`. Default
if not specified is the normal distribution. | t_test | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def _conf_int_generic(self, center, se, alpha, dist_args=None):
"""internal function to avoid code duplication
"""
if dist_args is None:
dist_args = ()
q = self.dist.ppf(1 - alpha / 2., *dist_args)
lower = center - q * se
upper = center + q * se
ci = np.column_stack((lower, upper))
# if we want to stack at a new last axis, for lower.ndim > 1
# np.concatenate((lower[..., None], upper[..., None]), axis=-1)
return ci | internal function to avoid code duplication | _conf_int_generic | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def conf_int(self, *, alpha=0.05, **kwds):
"""Confidence interval for the predicted value.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
kwds : extra keyword arguments
Ignored in base class, only for compatibility, consistent signature
with subclasses
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
ci = self._conf_int_generic(self.predicted, self.se, alpha,
dist_args=self.dist_args)
return ci | Confidence interval for the predicted value.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
kwds : extra keyword arguments
Ignored in base class, only for compatibility, consistent signature
with subclasses
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns. | conf_int | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def summary_frame(self, alpha=0.05):
"""Summary frame
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
pandas DataFrame with columns 'predicted', 'se', 'ci_lower', 'ci_upper'
"""
ci = self.conf_int(alpha=alpha)
to_include = {}
to_include['predicted'] = self.predicted
to_include['se'] = self.se
to_include['ci_lower'] = ci[:, 0]
to_include['ci_upper'] = ci[:, 1]
self.table = to_include
# pandas dict does not handle 2d_array
# data = np.column_stack(list(to_include.values()))
# names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
return res | Summary frame
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
pandas DataFrame with columns 'predicted', 'se', 'ci_lower', 'ci_upper' | summary_frame | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def conf_int(self, method='endpoint', alpha=0.05, **kwds):
"""Confidence interval for the predicted value.
This is currently only available for t and z tests.
Parameters
----------
method : {"endpoint", "delta"}
Method for confidence interval, "m
If method is "endpoint", then the confidence interval of the
linear predictor is transformed by the prediction function.
If method is "delta", then the delta-method is used. The confidence
interval in this case might reach outside the range of the
prediction, for example probabilities larger than one or smaller
than zero.
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
kwds : extra keyword arguments
currently ignored, only for compatibility, consistent signature
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
tmp = np.linspace(0, 1, 6)
# TODO: drop check?
is_linear = (self.func(tmp) == tmp).all()
if method == 'endpoint' and not is_linear:
ci_linear = self._conf_int_generic(self.linpred, self.linpred_se,
alpha,
dist_args=self.dist_args)
ci = self.func(ci_linear)
elif method == 'delta' or is_linear:
ci = self._conf_int_generic(self.predicted, self.se, alpha,
dist_args=self.dist_args)
return ci | Confidence interval for the predicted value.
This is currently only available for t and z tests.
Parameters
----------
method : {"endpoint", "delta"}
Method for confidence interval, "m
If method is "endpoint", then the confidence interval of the
linear predictor is transformed by the prediction function.
If method is "delta", then the delta-method is used. The confidence
interval in this case might reach outside the range of the
prediction, for example probabilities larger than one or smaller
than zero.
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
kwds : extra keyword arguments
currently ignored, only for compatibility, consistent signature
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns. | conf_int | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def summary_frame(self, alpha=0.05):
"""Summary frame
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
pandas DataFrame with columns
'mean', 'mean_se', 'mean_ci_lower', 'mean_ci_upper'.
"""
# TODO: finish and cleanup
ci_mean = self.conf_int(alpha=alpha)
to_include = {}
to_include['mean'] = self.predicted_mean
to_include['mean_se'] = self.se_mean
to_include['mean_ci_lower'] = ci_mean[:, 0]
to_include['mean_ci_upper'] = ci_mean[:, 1]
self.table = to_include
# pandas dict does not handle 2d_array
# data = np.column_stack(list(to_include.values()))
# names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
return res | Summary frame
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
pandas DataFrame with columns
'mean', 'mean_se', 'mean_ci_lower', 'mean_ci_upper'. | summary_frame | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def _get_exog_predict(self, exog=None, transform=True, row_labels=None):
"""Prepare or transform exog for prediction
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
Returns
-------
exog : ndarray
Prediction exog
row_labels : list of str
Labels or pandas index for rows of prediction
"""
# prepare exog and row_labels, based on base Results.predict
if transform and hasattr(self.model, 'formula') and exog is not None:
from statsmodels.formula._manager import FormulaManager
mgr = FormulaManager()
if isinstance(exog, pd.Series):
exog = pd.DataFrame(exog)
exog = mgr.get_matrices(self.model.data.model_spec, exog)
if exog is not None:
if row_labels is None:
row_labels = getattr(exog, 'index', None)
if callable(row_labels):
row_labels = None
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
else:
exog = self.model.exog
if row_labels is None:
row_labels = getattr(self.model.data, 'row_labels', None)
return exog, row_labels | Prepare or transform exog for prediction
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
Returns
-------
exog : ndarray
Prediction exog
row_labels : list of str
Labels or pandas index for rows of prediction | _get_exog_predict | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def get_prediction_glm(self, exog=None, transform=True,
row_labels=None, linpred=None, link=None,
pred_kwds=None):
"""
Compute prediction results for GLM compatible models.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
linpred : linear prediction instance
Instance of linear prediction results used for confidence intervals
based on endpoint transformation.
link : instance of link function
If no link function is provided, then the `model.family.link` is used.
pred_kwds : dict
Some models can take additional keyword arguments, such as offset or
additional exog in multi-part models. See the predict method of the
model for the details.
Returns
-------
prediction_results : generalized_linear_model.PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
# prepare exog and row_labels, based on base Results.predict
exog, row_labels = _get_exog_predict(
self,
exog=exog,
transform=transform,
row_labels=row_labels,
)
if pred_kwds is None:
pred_kwds = {}
predicted_mean = self.model.predict(self.params, exog, **pred_kwds)
covb = self.cov_params()
link_deriv = self.model.family.link.inverse_deriv(linpred.predicted_mean)
var_pred_mean = link_deriv**2 * (exog * np.dot(covb, exog.T).T).sum(1)
var_resid = self.scale # self.mse_resid / weights
# TODO: check that we have correct scale, Refactor scale #???
# special case for now:
if self.cov_type == 'fixed scale':
var_resid = self.cov_kwds['scale']
dist = ['norm', 't'][self.use_t]
return PredictionResultsMean(
predicted_mean, var_pred_mean, var_resid,
df=self.df_resid, dist=dist,
row_labels=row_labels, linpred=linpred, link=link) | Compute prediction results for GLM compatible models.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
linpred : linear prediction instance
Instance of linear prediction results used for confidence intervals
based on endpoint transformation.
link : instance of link function
If no link function is provided, then the `model.family.link` is used.
pred_kwds : dict
Some models can take additional keyword arguments, such as offset or
additional exog in multi-part models. See the predict method of the
model for the details.
Returns
-------
prediction_results : generalized_linear_model.PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations. | get_prediction_glm | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def get_prediction_linear(self, exog=None, transform=True,
row_labels=None, pred_kwds=None, index=None):
"""
Compute prediction results for linear prediction.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
pred_kwargs :
Some models can take additional keyword arguments, such as offset or
additional exog in multi-part models.
See the predict method of the model for the details.
index : slice or array-index
Is used to select rows and columns of cov_params, if the prediction
function only depends on a subset of parameters.
Returns
-------
prediction_results : PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction.
"""
# prepare exog and row_labels, based on base Results.predict
exog, row_labels = _get_exog_predict(
self,
exog=exog,
transform=transform,
row_labels=row_labels,
)
if pred_kwds is None:
pred_kwds = {}
k1 = exog.shape[1]
if len(self.params > k1):
# TODO: we allow endpoint transformation only for the first link
index = np.arange(k1)
else:
index = None
# get linear prediction and standard errors
covb = self.cov_params(column=index)
var_pred = (exog * np.dot(covb, exog.T).T).sum(1)
pred_kwds_linear = pred_kwds.copy()
pred_kwds_linear["which"] = "linear"
predicted = self.model.predict(self.params, exog, **pred_kwds_linear)
dist = ['norm', 't'][self.use_t]
res = PredictionResultsBase(predicted, var_pred,
df=self.df_resid, dist=dist,
row_labels=row_labels
)
return res | Compute prediction results for linear prediction.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
pred_kwargs :
Some models can take additional keyword arguments, such as offset or
additional exog in multi-part models.
See the predict method of the model for the details.
index : slice or array-index
Is used to select rows and columns of cov_params, if the prediction
function only depends on a subset of parameters.
Returns
-------
prediction_results : PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction. | get_prediction_linear | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def get_prediction_monotonic(self, exog=None, transform=True,
row_labels=None, link=None,
pred_kwds=None, index=None):
"""
Compute prediction results when endpoint transformation is valid.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
link : instance of link function
If no link function is provided, then the ``mmodel.family.link` is
used.
pred_kwargs :
Some models can take additional keyword arguments, such as offset or
additional exog in multi-part models.
See the predict method of the model for the details.
index : slice or array-index
Is used to select rows and columns of cov_params, if the prediction
function only depends on a subset of parameters.
Returns
-------
prediction_results : PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction.
"""
# prepare exog and row_labels, based on base Results.predict
exog, row_labels = _get_exog_predict(
self,
exog=exog,
transform=transform,
row_labels=row_labels,
)
if pred_kwds is None:
pred_kwds = {}
if link is None:
link = self.model.family.link
func_deriv = link.inverse_deriv
# get linear prediction and standard errors
covb = self.cov_params(column=index)
linpred_var = (exog * np.dot(covb, exog.T).T).sum(1)
pred_kwds_linear = pred_kwds.copy()
pred_kwds_linear["which"] = "linear"
linpred = self.model.predict(self.params, exog, **pred_kwds_linear)
predicted = self.model.predict(self.params, exog, **pred_kwds)
link_deriv = func_deriv(linpred)
var_pred = link_deriv**2 * linpred_var
dist = ['norm', 't'][self.use_t]
res = PredictionResultsMonotonic(predicted, var_pred,
df=self.df_resid, dist=dist,
row_labels=row_labels, linpred=linpred,
linpred_se=np.sqrt(linpred_var),
func=link.inverse, deriv=func_deriv)
return res | Compute prediction results when endpoint transformation is valid.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
link : instance of link function
If no link function is provided, then the ``mmodel.family.link` is
used.
pred_kwargs :
Some models can take additional keyword arguments, such as offset or
additional exog in multi-part models.
See the predict method of the model for the details.
index : slice or array-index
Is used to select rows and columns of cov_params, if the prediction
function only depends on a subset of parameters.
Returns
-------
prediction_results : PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction. | get_prediction_monotonic | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def f_pred(p):
"""Prediction function as function of params
"""
pred = self.model.predict(p, exog, which=which, **pred_kwds)
if average:
# using `.T` which should work if aggweights is 1-dim
pred = (pred.T * agg_weights.T).mean(-1).T
return pred | Prediction function as function of params | get_prediction_delta.f_pred | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def get_prediction_delta(
self,
exog=None,
which="mean",
average=False,
agg_weights=None,
transform=True,
row_labels=None,
pred_kwds=None
):
"""
compute prediction results
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
which : str
The statistic that is prediction. Which statistics are available
depends on the model.predict method.
average : bool
If average is True, then the mean prediction is computed, that is,
predictions are computed for individual exog and then them mean over
observation is used.
If average is False, then the results are the predictions for all
observations, i.e. same length as ``exog``.
agg_weights : ndarray, optional
Aggregation weights, only used if average is True.
The weights are not normalized.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
pred_kwargs :
Some models can take additional keyword arguments, such as offset or
additional exog in multi-part models.
See the predict method of the model for the details.
Returns
-------
prediction_results : generalized_linear_model.PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
# prepare exog and row_labels, based on base Results.predict
exog, row_labels = _get_exog_predict(
self,
exog=exog,
transform=transform,
row_labels=row_labels,
)
if agg_weights is None:
agg_weights = np.array(1.)
def f_pred(p):
"""Prediction function as function of params
"""
pred = self.model.predict(p, exog, which=which, **pred_kwds)
if average:
# using `.T` which should work if aggweights is 1-dim
pred = (pred.T * agg_weights.T).mean(-1).T
return pred
nlpm = self._get_wald_nonlinear(f_pred)
# TODO: currently returns NonlinearDeltaCov
res = PredictionResultsDelta(nlpm)
return res | compute prediction results
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
which : str
The statistic that is prediction. Which statistics are available
depends on the model.predict method.
average : bool
If average is True, then the mean prediction is computed, that is,
predictions are computed for individual exog and then them mean over
observation is used.
If average is False, then the results are the predictions for all
observations, i.e. same length as ``exog``.
agg_weights : ndarray, optional
Aggregation weights, only used if average is True.
The weights are not normalized.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
pred_kwargs :
Some models can take additional keyword arguments, such as offset or
additional exog in multi-part models.
See the predict method of the model for the details.
Returns
-------
prediction_results : generalized_linear_model.PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations. | get_prediction_delta | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def get_prediction(self, exog=None, transform=True, which="mean",
row_labels=None, average=False, agg_weights=None,
pred_kwds=None):
"""
Compute prediction results when endpoint transformation is valid.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
which : str
Which statistic is to be predicted. Default is "mean".
The available statistics and options depend on the model.
see the model.predict docstring
linear : bool
Linear has been replaced by the `which` keyword and will be
deprecated.
If linear is True, then `which` is ignored and the linear
prediction is returned.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
average : bool
If average is True, then the mean prediction is computed, that is,
predictions are computed for individual exog and then the average
over observation is used.
If average is False, then the results are the predictions for all
observations, i.e. same length as ``exog``.
agg_weights : ndarray, optional
Aggregation weights, only used if average is True.
The weights are not normalized.
**kwargs :
Some models can take additional keyword arguments, such as offset,
exposure or additional exog in multi-part models like zero inflated
models.
See the predict method of the model for the details.
Returns
-------
prediction_results : PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and
summary dataframe for the prediction.
Notes
-----
Status: new in 0.14, experimental
"""
use_endpoint = getattr(self.model, "_use_endpoint", True)
if which == "linear":
res = get_prediction_linear(
self,
exog=exog,
transform=transform,
row_labels=row_labels,
pred_kwds=pred_kwds,
)
elif (which == "mean") and (use_endpoint is True) and (average is False):
# endpoint transformation
k1 = self.model.exog.shape[1]
if len(self.params > k1):
# TODO: we allow endpoint transformation only for the first link
index = np.arange(k1)
else:
index = None
pred_kwds["which"] = which
# TODO: add link or ilink to all link based models (except zi
link = getattr(self.model, "link", None)
if link is None:
# GLM
if hasattr(self.model, "family"):
link = getattr(self.model.family, "link", None)
if link is None:
# defaulting to log link for count models
import warnings
warnings.warn("using default log-link in get_prediction")
from statsmodels.genmod.families import links
link = links.Log()
res = get_prediction_monotonic(
self,
exog=exog,
transform=transform,
row_labels=row_labels,
link=link,
pred_kwds=pred_kwds,
index=index,
)
else:
# which is not mean or linear, or we need averaging
res = get_prediction_delta(
self,
exog=exog,
which=which,
average=average,
agg_weights=agg_weights,
pred_kwds=pred_kwds,
)
return res | Compute prediction results when endpoint transformation is valid.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
which : str
Which statistic is to be predicted. Default is "mean".
The available statistics and options depend on the model.
see the model.predict docstring
linear : bool
Linear has been replaced by the `which` keyword and will be
deprecated.
If linear is True, then `which` is ignored and the linear
prediction is returned.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
average : bool
If average is True, then the mean prediction is computed, that is,
predictions are computed for individual exog and then the average
over observation is used.
If average is False, then the results are the predictions for all
observations, i.e. same length as ``exog``.
agg_weights : ndarray, optional
Aggregation weights, only used if average is True.
The weights are not normalized.
**kwargs :
Some models can take additional keyword arguments, such as offset,
exposure or additional exog in multi-part models like zero inflated
models.
See the predict method of the model for the details.
Returns
-------
prediction_results : PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and
summary dataframe for the prediction.
Notes
-----
Status: new in 0.14, experimental | get_prediction | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def params_transform_univariate(params, cov_params, link=None, transform=None,
row_labels=None):
"""
results for univariate, nonlinear, monotonicaly transformed parameters
This provides transformed values, standard errors and confidence interval
for transformations of parameters, for example in calculating rates with
`exp(params)` in the case of Poisson or other models with exponential
mean function.
"""
from statsmodels.genmod.families import links
if link is None and transform is None:
link = links.Log()
if row_labels is None and hasattr(params, 'index'):
row_labels = params.index
params = np.asarray(params)
predicted_mean = link.inverse(params)
link_deriv = link.inverse_deriv(params)
var_pred_mean = link_deriv**2 * np.diag(cov_params)
# TODO: do we want covariance also, or just var/se
dist = stats.norm
# TODO: need ci for linear prediction, method of `lin_pred
linpred = PredictionResultsMean(
params, np.diag(cov_params), dist=dist,
row_labels=row_labels, link=links.Identity())
res = PredictionResultsMean(
predicted_mean, var_pred_mean, dist=dist,
row_labels=row_labels, linpred=linpred, link=link)
return res | results for univariate, nonlinear, monotonicaly transformed parameters
This provides transformed values, standard errors and confidence interval
for transformations of parameters, for example in calculating rates with
`exp(params)` in the case of Poisson or other models with exponential
mean function. | params_transform_univariate | python | statsmodels/statsmodels | statsmodels/base/_prediction_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py | BSD-3-Clause |
def loglikeobs(self, params, pen_weight=None, **kwds):
"""
Log-likelihood of model observations at params
"""
if pen_weight is None:
pen_weight = self.pen_weight
llf = super().loglikeobs(params, **kwds)
nobs_llf = float(llf.shape[0])
if pen_weight != 0:
scale = self._handle_scale(params, **kwds)
llf -= 1/scale * pen_weight / nobs_llf * self.penal.func(params)
return llf | Log-likelihood of model observations at params | loglikeobs | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def score_numdiff(self, params, pen_weight=None, method='fd', **kwds):
"""score based on finite difference derivative
"""
if pen_weight is None:
pen_weight = self.pen_weight
def loglike(p):
return self.loglike(p, pen_weight=pen_weight, **kwds)
if method == 'cs':
return approx_fprime_cs(params, loglike)
elif method == 'fd':
return approx_fprime(params, loglike, centered=True)
else:
raise ValueError('method not recognized, should be "fd" or "cs"') | score based on finite difference derivative | score_numdiff | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def score(self, params, pen_weight=None, **kwds):
"""
Gradient of model at params
"""
if pen_weight is None:
pen_weight = self.pen_weight
sc = super().score(params, **kwds)
if pen_weight != 0:
scale = self._handle_scale(params, **kwds)
sc -= 1/scale * pen_weight * self.penal.deriv(params)
return sc | Gradient of model at params | score | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def score_obs(self, params, pen_weight=None, **kwds):
"""
Gradient of model observations at params
"""
if pen_weight is None:
pen_weight = self.pen_weight
sc = super().score_obs(params, **kwds)
nobs_sc = float(sc.shape[0])
if pen_weight != 0:
scale = self._handle_scale(params, **kwds)
sc -= 1/scale * pen_weight / nobs_sc * self.penal.deriv(params)
return sc | Gradient of model observations at params | score_obs | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def hessian_numdiff(self, params, pen_weight=None, **kwds):
"""hessian based on finite difference derivative
"""
if pen_weight is None:
pen_weight = self.pen_weight
def loglike(p):
return self.loglike(p, pen_weight=pen_weight, **kwds)
from statsmodels.tools.numdiff import approx_hess
return approx_hess(params, loglike) | hessian based on finite difference derivative | hessian_numdiff | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def hessian(self, params, pen_weight=None, **kwds):
"""
Hessian of model at params
"""
if pen_weight is None:
pen_weight = self.pen_weight
hess = super().hessian(params, **kwds)
if pen_weight != 0:
scale = self._handle_scale(params, **kwds)
h = self.penal.deriv2(params)
if h.ndim == 1:
hess -= 1/scale * np.diag(pen_weight * h)
else:
hess -= 1/scale * pen_weight * h
return hess | Hessian of model at params | hessian | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def fit(self, method=None, trim=None, **kwds):
"""minimize negative penalized log-likelihood
Parameters
----------
method : None or str
Method specifies the scipy optimizer as in nonlinear MLE models.
trim : {bool, float}
Default is False or None, which uses no trimming.
If trim is True or a float, then small parameters are set to zero.
If True, then a default threshold is used. If trim is a float, then
it will be used as threshold.
The default threshold is currently 1e-4, but it will change in
future and become penalty function dependent.
kwds : extra keyword arguments
This keyword arguments are treated in the same way as in the
fit method of the underlying model class.
Specifically, additional optimizer keywords and cov_type related
keywords can be added.
"""
# If method is None, then we choose a default method ourselves
# TODO: temporary hack, need extra fit kwds
# we need to rule out fit methods in a model that will not work with
# penalization
from statsmodels.gam.generalized_additive_model import GLMGam
from statsmodels.genmod.generalized_linear_model import GLM
# Only for fit methods supporting max_start_irls
if isinstance(self, (GLM, GLMGam)):
kwds.update({'max_start_irls': 0})
# currently we use `bfgs` by default
if method is None:
method = 'bfgs'
if trim is None:
trim = False
res = super().fit(method=method, **kwds)
if trim is False:
# note boolean check for "is False", not "False_like"
return res
else:
if trim is True:
trim = 1e-4 # trim threshold
# TODO: make it penal function dependent
# temporary standin, only checked for Poisson and GLM,
# and is computationally inefficient
drop_index = np.nonzero(np.abs(res.params) < trim)[0]
keep_index = np.nonzero(np.abs(res.params) > trim)[0]
if drop_index.any():
# TODO: do we need to add results attributes?
res_aux = self._fit_zeros(keep_index, **kwds)
return res_aux
else:
return res | minimize negative penalized log-likelihood
Parameters
----------
method : None or str
Method specifies the scipy optimizer as in nonlinear MLE models.
trim : {bool, float}
Default is False or None, which uses no trimming.
If trim is True or a float, then small parameters are set to zero.
If True, then a default threshold is used. If trim is a float, then
it will be used as threshold.
The default threshold is currently 1e-4, but it will change in
future and become penalty function dependent.
kwds : extra keyword arguments
This keyword arguments are treated in the same way as in the
fit method of the underlying model class.
Specifically, additional optimizer keywords and cov_type related
keywords can be added. | fit | python | statsmodels/statsmodels | statsmodels/base/_penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py | BSD-3-Clause |
def from_formula_parser(cls, lc):
"""class method to create instance from patsy instance
Parameters
----------
lc : instance
instance of patsy LinearConstraint, or other instances that have
attributes ``lc.coefs, lc.constants, lc.variable_names``
Returns
-------
instance of this class
"""
try:
return cls(lc.constraint_matrix, lc.constraint_values, lc.variable_names)
except AttributeError:
return cls(lc.coefs, lc.constants, lc.variable_names) | class method to create instance from patsy instance
Parameters
----------
lc : instance
instance of patsy LinearConstraint, or other instances that have
attributes ``lc.coefs, lc.constants, lc.variable_names``
Returns
-------
instance of this class | from_formula_parser | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def expand(self, params_reduced):
"""transform from the reduced to the full parameter space
Parameters
----------
params_reduced : array_like
parameters in the transformed space
Returns
-------
params : array_like
parameters in the original space
Notes
-----
If the restriction is not homogeneous, i.e. q is not equal to zero,
then this is an affine transform.
"""
params_reduced = np.asarray(params_reduced)
return self.transf_mat.dot(params_reduced.T).T + self.constant | transform from the reduced to the full parameter space
Parameters
----------
params_reduced : array_like
parameters in the transformed space
Returns
-------
params : array_like
parameters in the original space
Notes
-----
If the restriction is not homogeneous, i.e. q is not equal to zero,
then this is an affine transform. | expand | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def reduce(self, params):
"""transform from the full to the reduced parameter space
Parameters
----------
params : array_like
parameters or data in the original space
Returns
-------
params_reduced : array_like
parameters in the transformed space
This transform can be applied to the original parameters as well
as to the data. If params is 2-d, then each row is transformed.
"""
params = np.asarray(params)
return params.dot(self.transf_mat) | transform from the full to the reduced parameter space
Parameters
----------
params : array_like
parameters or data in the original space
Returns
-------
params_reduced : array_like
parameters in the transformed space
This transform can be applied to the original parameters as well
as to the data. If params is 2-d, then each row is transformed. | reduce | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def transform_params_constraint(params, Sinv, R, q):
"""find the parameters that statisfy linear constraint from unconstrained
The linear constraint R params = q is imposed.
Parameters
----------
params : array_like
unconstrained parameters
Sinv : ndarray, 2d, symmetric
covariance matrix of the parameter estimate
R : ndarray, 2d
constraint matrix
q : ndarray, 1d
values of the constraint
Returns
-------
params_constraint : ndarray
parameters of the same length as params satisfying the constraint
Notes
-----
This is the exact formula for OLS and other linear models. It will be
a local approximation for nonlinear models.
TODO: Is Sinv always the covariance matrix?
In the linear case it can be (X'X)^{-1} or sigmahat^2 (X'X)^{-1}.
My guess is that this is the point in the subspace that satisfies
the constraint that has minimum Mahalanobis distance. Proof ?
"""
rsr = R.dot(Sinv).dot(R.T)
reduction = Sinv.dot(R.T).dot(np.linalg.solve(rsr, R.dot(params) - q))
return params - reduction | find the parameters that statisfy linear constraint from unconstrained
The linear constraint R params = q is imposed.
Parameters
----------
params : array_like
unconstrained parameters
Sinv : ndarray, 2d, symmetric
covariance matrix of the parameter estimate
R : ndarray, 2d
constraint matrix
q : ndarray, 1d
values of the constraint
Returns
-------
params_constraint : ndarray
parameters of the same length as params satisfying the constraint
Notes
-----
This is the exact formula for OLS and other linear models. It will be
a local approximation for nonlinear models.
TODO: Is Sinv always the covariance matrix?
In the linear case it can be (X'X)^{-1} or sigmahat^2 (X'X)^{-1}.
My guess is that this is the point in the subspace that satisfies
the constraint that has minimum Mahalanobis distance. Proof ? | transform_params_constraint | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def fit_constrained(model, constraint_matrix, constraint_values,
start_params=None, fit_kwds=None):
# note: self is model instance
"""fit model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
model: model instance
An instance of a model, see limitations in Notes section
constraint_matrix : array_like, 2D
This is R in the linear equality constraint `R params = q`.
The number of columns needs to be the same as the number of columns
in exog.
constraint_values :
This is `q` in the linear equality constraint `R params = q`
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
params : ndarray ?
estimated parameters (in the original parameterization
cov_params : ndarray
covariance matrix of the parameter estimates. This is a reverse
transformation of the covariance matrix of the transformed model given
by `cov_params()`
Note: `fit_kwds` can affect the choice of covariance, e.g. by
specifying `cov_type`, which will be reflected in the returned
covariance.
res_constr : results instance
This is the results instance for the created transformed model.
Notes
-----
Limitations:
Models where the number of parameters is different from the number of
columns of exog are not yet supported.
Requires a model that implement an offset option.
"""
self = model # internal alias, used for methods
if fit_kwds is None:
fit_kwds = {}
R, q = constraint_matrix, constraint_values
endog, exog = self.endog, self.exog
transf = TransformRestriction(R, q)
exogp_st = transf.reduce(exog)
offset = exog.dot(transf.constant.squeeze())
if hasattr(self, 'offset'):
offset += self.offset
if start_params is not None:
start_params = transf.reduce(start_params)
# need copy, because we do not want to change it, we do not need deepcopy
import copy
init_kwds = copy.copy(self._get_init_kwds())
# TODO: refactor to combine with above or offset_all
if 'offset' in init_kwds:
del init_kwds['offset']
# using offset as keywords is not supported in all modules
mod_constr = self.__class__(endog, exogp_st, offset=offset, **init_kwds)
res_constr = mod_constr.fit(start_params=start_params, **fit_kwds)
params_orig = transf.expand(res_constr.params).squeeze()
cov_params = transf.transf_mat.dot(res_constr.cov_params()).dot(transf.transf_mat.T)
return params_orig, cov_params, res_constr | fit model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
model: model instance
An instance of a model, see limitations in Notes section
constraint_matrix : array_like, 2D
This is R in the linear equality constraint `R params = q`.
The number of columns needs to be the same as the number of columns
in exog.
constraint_values :
This is `q` in the linear equality constraint `R params = q`
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
params : ndarray ?
estimated parameters (in the original parameterization
cov_params : ndarray
covariance matrix of the parameter estimates. This is a reverse
transformation of the covariance matrix of the transformed model given
by `cov_params()`
Note: `fit_kwds` can affect the choice of covariance, e.g. by
specifying `cov_type`, which will be reflected in the returned
covariance.
res_constr : results instance
This is the results instance for the created transformed model.
Notes
-----
Limitations:
Models where the number of parameters is different from the number of
columns of exog are not yet supported.
Requires a model that implement an offset option. | fit_constrained | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def fit_constrained_wrap(model, constraints, start_params=None, **fit_kwds):
"""fit_constraint that returns a results instance
This is a development version for fit_constrained methods or
fit_constrained as standalone function.
It will not work correctly for all models because creating a new
results instance is not standardized for use outside the `fit` methods,
and might need adjustements for this.
This is the prototype for the fit_constrained method that has been added
to Poisson and GLM.
"""
self = model # alias for use as method
# constraints = (R, q)
# TODO: temporary trailing underscore to not overwrite the monkey
# patched version
# TODO: decide whether to move the imports
from statsmodels.formula._manager import FormulaManager
# we need this import if we copy it to a different module
# from statsmodels.base._constraints import fit_constrained
# same pattern as in base.LikelihoodModel.t_test
mgr = FormulaManager()
lc = mgr.get_linear_constraints(constraints, self.exog_names)
R, q = lc.constraint_matrix, lc.constraint_values
# TODO: add start_params option, need access to tranformation
# fit_constrained needs to do the transformation
params, cov, res_constr = fit_constrained(self, R, q,
start_params=start_params,
fit_kwds=fit_kwds)
# create dummy results Instance, TODO: wire up properly
res = self.fit(start_params=params, maxiter=0,
warn_convergence=False) # we get a wrapper back
res._results.params = params
res._results.cov_params_default = cov
cov_type = fit_kwds.get('cov_type', 'nonrobust')
if cov_type == 'nonrobust':
res._results.normalized_cov_params = cov / res_constr.scale
else:
res._results.normalized_cov_params = None
k_constr = len(q)
res._results.df_resid += k_constr
res._results.df_model -= k_constr
res._results.constraints = LinearConstraints.from_formula_parser(lc)
res._results.k_constr = k_constr
res._results.results_constrained = res_constr
return res | fit_constraint that returns a results instance
This is a development version for fit_constrained methods or
fit_constrained as standalone function.
It will not work correctly for all models because creating a new
results instance is not standardized for use outside the `fit` methods,
and might need adjustements for this.
This is the prototype for the fit_constrained method that has been added
to Poisson and GLM. | fit_constrained_wrap | python | statsmodels/statsmodels | statsmodels/base/_constraints.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_constraints.py | BSD-3-Clause |
def fit_l1_slsqp(
f, score, start_params, args, kwargs, disp=False, maxiter=1000,
callback=None, retall=False, full_output=False, hess=None):
"""
Solve the l1 regularized problem using scipy.optimize.fmin_slsqp().
Specifically: We convert the convex but non-smooth problem
.. math:: \\min_\\beta f(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem in twice
as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} f(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
Parameters
----------
All the usual parameters from LikelhoodModel.fit
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure
acc : float (default 1e-6)
Requested accuracy as used by slsqp
"""
start_params = np.array(start_params).ravel('F')
### Extract values
# k_params is total number of covariates,
# possibly including a leading constant.
k_params = len(start_params)
# The start point
x0 = np.append(start_params, np.fabs(start_params))
# alpha is the regularization parameter
alpha = np.array(kwargs['alpha_rescaled']).ravel('F')
# Make sure it's a vector
alpha = alpha * np.ones(k_params)
assert alpha.min() >= 0
# Convert display parameters to scipy.optimize form
disp_slsqp = _get_disp_slsqp(disp, retall)
# Set/retrieve the desired accuracy
acc = kwargs.setdefault('acc', 1e-10)
### Wrap up for use in fmin_slsqp
def func(x_full):
return _objective_func(f, x_full, k_params, alpha, *args)
def f_ieqcons_wrap(x_full):
return _f_ieqcons(x_full, k_params)
def fprime_wrap(x_full):
return _fprime(score, x_full, k_params, alpha)
def fprime_ieqcons_wrap(x_full):
return _fprime_ieqcons(x_full, k_params)
### Call the solver
results = fmin_slsqp(
func, x0, f_ieqcons=f_ieqcons_wrap, fprime=fprime_wrap, acc=acc,
iter=maxiter, disp=disp_slsqp, full_output=full_output,
fprime_ieqcons=fprime_ieqcons_wrap)
params = np.asarray(results[0][:k_params])
### Post-process
# QC
qc_tol = kwargs['qc_tol']
qc_verbose = kwargs['qc_verbose']
passed = l1_solvers_common.qc_results(
params, alpha, score, qc_tol, qc_verbose)
# Possibly trim
trim_mode = kwargs['trim_mode']
size_trim_tol = kwargs['size_trim_tol']
auto_trim_tol = kwargs['auto_trim_tol']
params, trimmed = l1_solvers_common.do_trim_params(
params, k_params, alpha, score, passed, trim_mode, size_trim_tol,
auto_trim_tol)
### Pack up return values for statsmodels optimizers
# TODO These retvals are returned as mle_retvals...but the fit was not ML.
# This could be confusing someday.
if full_output:
x_full, fx, its, imode, smode = results
fopt = func(np.asarray(x_full))
converged = (imode == 0)
warnflag = str(imode) + ' ' + smode
iterations = its
gopt = float('nan') # Objective is non-differentiable
hopt = float('nan')
retvals = {
'fopt': fopt, 'converged': converged, 'iterations': iterations,
'gopt': gopt, 'hopt': hopt, 'trimmed': trimmed,
'warnflag': warnflag}
### Return
if full_output:
return params, retvals
else:
return params | Solve the l1 regularized problem using scipy.optimize.fmin_slsqp().
Specifically: We convert the convex but non-smooth problem
.. math:: \\min_\\beta f(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem in twice
as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} f(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
Parameters
----------
All the usual parameters from LikelhoodModel.fit
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure
acc : float (default 1e-6)
Requested accuracy as used by slsqp | fit_l1_slsqp | python | statsmodels/statsmodels | statsmodels/base/l1_slsqp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_slsqp.py | BSD-3-Clause |
def _objective_func(f, x_full, k_params, alpha, *args):
"""
The regularized objective function
"""
x_params = x_full[:k_params]
x_added = x_full[k_params:]
## Return
return f(x_params, *args) + (alpha * x_added).sum() | The regularized objective function | _objective_func | python | statsmodels/statsmodels | statsmodels/base/l1_slsqp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_slsqp.py | BSD-3-Clause |
def _fprime(score, x_full, k_params, alpha):
"""
The regularized derivative
"""
x_params = x_full[:k_params]
# The derivative just appends a vector of constants
return np.append(score(x_params), alpha) | The regularized derivative | _fprime | python | statsmodels/statsmodels | statsmodels/base/l1_slsqp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_slsqp.py | BSD-3-Clause |
def _f_ieqcons(x_full, k_params):
"""
The inequality constraints.
"""
x_params = x_full[:k_params]
x_added = x_full[k_params:]
# All entries in this vector must be \geq 0 in a feasible solution
return np.append(x_params + x_added, x_added - x_params) | The inequality constraints. | _f_ieqcons | python | statsmodels/statsmodels | statsmodels/base/l1_slsqp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_slsqp.py | BSD-3-Clause |
def _fprime_ieqcons(x_full, k_params):
"""
Derivative of the inequality constraints
"""
I = np.eye(k_params) # noqa:E741
A = np.concatenate((I, I), axis=1)
B = np.concatenate((-I, I), axis=1)
C = np.concatenate((A, B), axis=0)
## Return
return C | Derivative of the inequality constraints | _fprime_ieqcons | python | statsmodels/statsmodels | statsmodels/base/l1_slsqp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_slsqp.py | BSD-3-Clause |
def save(self, fname, remove_data=False):
"""
Save a pickle of this instance.
Parameters
----------
fname : {str, handle}
Either a filename or a valid file handle.
remove_data : bool
If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None.
"""
from statsmodels.iolib.smpickle import save_pickle
if remove_data:
self.remove_data()
save_pickle(self, fname) | Save a pickle of this instance.
Parameters
----------
fname : {str, handle}
Either a filename or a valid file handle.
remove_data : bool
If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None. | save | python | statsmodels/statsmodels | statsmodels/base/wrapper.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/wrapper.py | BSD-3-Clause |
def load(cls, fname):
"""
Load a pickled results instance
.. warning::
Loading pickled models is not secure against erroneous or
maliciously constructed data. Never unpickle data received from
an untrusted or unauthenticated source.
Parameters
----------
fname : {str, handle}
A string filename or a file handle.
Returns
-------
Results
The unpickled results instance.
"""
from statsmodels.iolib.smpickle import load_pickle
return load_pickle(fname) | Load a pickled results instance
.. warning::
Loading pickled models is not secure against erroneous or
maliciously constructed data. Never unpickle data received from
an untrusted or unauthenticated source.
Parameters
----------
fname : {str, handle}
A string filename or a file handle.
Returns
-------
Results
The unpickled results instance. | load | python | statsmodels/statsmodels | statsmodels/base/wrapper.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/wrapper.py | BSD-3-Clause |
def fit_l1_cvxopt_cp(
f, score, start_params, args, kwargs, disp=False, maxiter=100,
callback=None, retall=False, full_output=False, hess=None):
"""
Solve the l1 regularized problem using cvxopt.solvers.cp
Specifically: We convert the convex but non-smooth problem
.. math:: \\min_\\beta f(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem in twice
as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} f(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
Parameters
----------
All the usual parameters from LikelhoodModel.fit
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure
abstol : float
absolute accuracy (default: 1e-7).
reltol : float
relative accuracy (default: 1e-6).
feastol : float
tolerance for feasibility conditions (default: 1e-7).
refinement : int
number of iterative refinement steps when solving KKT equations
(default: 1).
"""
from cvxopt import matrix, solvers
start_params = np.array(start_params).ravel('F')
## Extract arguments
# k_params is total number of covariates, possibly including a leading constant.
k_params = len(start_params)
# The start point
x0 = np.append(start_params, np.fabs(start_params))
x0 = matrix(x0, (2 * k_params, 1))
# The regularization parameter
alpha = np.array(kwargs['alpha_rescaled']).ravel('F')
# Make sure it's a vector
alpha = alpha * np.ones(k_params)
assert alpha.min() >= 0
## Wrap up functions for cvxopt
def f_0(x):
return _objective_func(f, x, k_params, alpha, *args)
def Df(x):
return _fprime(score, x, k_params, alpha)
G = _get_G(k_params) # Inequality constraint matrix, Gx \leq h
h = matrix(0.0, (2 * k_params, 1)) # RHS in inequality constraint
def H(x, z):
return _hessian_wrapper(hess, x, z, k_params)
## Define the optimization function
def F(x=None, z=None):
if x is None:
return 0, x0
elif z is None:
return f_0(x), Df(x)
else:
return f_0(x), Df(x), H(x, z)
## Convert optimization settings to cvxopt form
solvers.options['show_progress'] = disp
solvers.options['maxiters'] = maxiter
if 'abstol' in kwargs:
solvers.options['abstol'] = kwargs['abstol']
if 'reltol' in kwargs:
solvers.options['reltol'] = kwargs['reltol']
if 'feastol' in kwargs:
solvers.options['feastol'] = kwargs['feastol']
if 'refinement' in kwargs:
solvers.options['refinement'] = kwargs['refinement']
### Call the optimizer
results = solvers.cp(F, G, h)
x = np.asarray(results['x']).ravel()
params = x[:k_params]
### Post-process
# QC
qc_tol = kwargs['qc_tol']
qc_verbose = kwargs['qc_verbose']
passed = l1_solvers_common.qc_results(
params, alpha, score, qc_tol, qc_verbose)
# Possibly trim
trim_mode = kwargs['trim_mode']
size_trim_tol = kwargs['size_trim_tol']
auto_trim_tol = kwargs['auto_trim_tol']
params, trimmed = l1_solvers_common.do_trim_params(
params, k_params, alpha, score, passed, trim_mode, size_trim_tol,
auto_trim_tol)
### Pack up return values for statsmodels
# TODO These retvals are returned as mle_retvals...but the fit was not ML
if full_output:
fopt = f_0(x)
gopt = float('nan') # Objective is non-differentiable
hopt = float('nan')
iterations = float('nan')
converged = (results['status'] == 'optimal')
warnflag = results['status']
retvals = {
'fopt': fopt, 'converged': converged, 'iterations': iterations,
'gopt': gopt, 'hopt': hopt, 'trimmed': trimmed,
'warnflag': warnflag}
else:
x = np.array(results['x']).ravel()
params = x[:k_params]
### Return results
if full_output:
return params, retvals
else:
return params | Solve the l1 regularized problem using cvxopt.solvers.cp
Specifically: We convert the convex but non-smooth problem
.. math:: \\min_\\beta f(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem in twice
as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} f(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
Parameters
----------
All the usual parameters from LikelhoodModel.fit
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure
abstol : float
absolute accuracy (default: 1e-7).
reltol : float
relative accuracy (default: 1e-6).
feastol : float
tolerance for feasibility conditions (default: 1e-7).
refinement : int
number of iterative refinement steps when solving KKT equations
(default: 1). | fit_l1_cvxopt_cp | python | statsmodels/statsmodels | statsmodels/base/l1_cvxopt.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_cvxopt.py | BSD-3-Clause |
def _objective_func(f, x, k_params, alpha, *args):
"""
The regularized objective function.
"""
from cvxopt import matrix
x_arr = np.asarray(x)
params = x_arr[:k_params].ravel()
u = x_arr[k_params:]
# Call the numpy version
objective_func_arr = f(params, *args) + (alpha * u).sum()
# Return
return matrix(objective_func_arr) | The regularized objective function. | _objective_func | python | statsmodels/statsmodels | statsmodels/base/l1_cvxopt.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_cvxopt.py | BSD-3-Clause |
def _fprime(score, x, k_params, alpha):
"""
The regularized derivative.
"""
from cvxopt import matrix
x_arr = np.asarray(x)
params = x_arr[:k_params].ravel()
# Call the numpy version
# The derivative just appends a vector of constants
fprime_arr = np.append(score(params), alpha)
# Return
return matrix(fprime_arr, (1, 2 * k_params)) | The regularized derivative. | _fprime | python | statsmodels/statsmodels | statsmodels/base/l1_cvxopt.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_cvxopt.py | BSD-3-Clause |
def _get_G(k_params):
"""
The linear inequality constraint matrix.
"""
from cvxopt import matrix
I = np.eye(k_params) # noqa:E741
A = np.concatenate((-I, -I), axis=1)
B = np.concatenate((I, -I), axis=1)
C = np.concatenate((A, B), axis=0)
# Return
return matrix(C) | The linear inequality constraint matrix. | _get_G | python | statsmodels/statsmodels | statsmodels/base/l1_cvxopt.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_cvxopt.py | BSD-3-Clause |
def _hessian_wrapper(hess, x, z, k_params):
"""
Wraps the hessian up in the form for cvxopt.
cvxopt wants the hessian of the objective function and the constraints.
Since our constraints are linear, this part is all zeros.
"""
from cvxopt import matrix
x_arr = np.asarray(x)
params = x_arr[:k_params].ravel()
zh_x = np.asarray(z[0]) * hess(params)
zero_mat = np.zeros(zh_x.shape)
A = np.concatenate((zh_x, zero_mat), axis=1)
B = np.concatenate((zero_mat, zero_mat), axis=1)
zh_x_ext = np.concatenate((A, B), axis=0)
return matrix(zh_x_ext, (2 * k_params, 2 * k_params)) | Wraps the hessian up in the form for cvxopt.
cvxopt wants the hessian of the objective function and the constraints.
Since our constraints are linear, this part is all zeros. | _hessian_wrapper | python | statsmodels/statsmodels | statsmodels/base/l1_cvxopt.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_cvxopt.py | BSD-3-Clause |
def _get_penal(self, weights=None):
"""create new Penalty instance
"""
return SCADSmoothed(0.1, c0=0.0001, weights=weights) | create new Penalty instance | _get_penal | python | statsmodels/statsmodels | statsmodels/base/_screening.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_screening.py | BSD-3-Clause |
def ranking_measure(self, res_pen, exog, keep=None):
"""compute measure for ranking exog candidates for inclusion
"""
endog = self.endog
if self.ranking_project:
assert res_pen.model.exog.shape[1] == len(keep)
ex_incl = res_pen.model.exog[:, keep]
exog = exog - ex_incl.dot(np.linalg.pinv(ex_incl).dot(exog))
if self.ranking_attr == 'predicted_poisson':
# I keep this for more experiments
# TODO: does it really help to change/trim params
# we are not reestimating with trimmed model
p = res_pen.params.copy()
if keep is not None:
p[~keep] = 0
predicted = res_pen.model.predict(p)
# this is currently hardcoded for Poisson
resid_factor = (endog - predicted) / np.sqrt(predicted)
elif self.ranking_attr[:6] == 'model.':
# use model method, this is intended for score_factor
attr = self.ranking_attr.split('.')[1]
resid_factor = getattr(res_pen.model, attr)(res_pen.params)
if resid_factor.ndim == 2:
# for score_factor when extra params are in model
resid_factor = resid_factor[:, 0]
mom_cond = np.abs(resid_factor.dot(exog))**2
else:
# use results attribute
resid_factor = getattr(res_pen, self.ranking_attr)
mom_cond = np.abs(resid_factor.dot(exog))**2
return mom_cond | compute measure for ranking exog candidates for inclusion | ranking_measure | python | statsmodels/statsmodels | statsmodels/base/_screening.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_screening.py | BSD-3-Clause |
def screen_exog(self, exog, endog=None, maxiter=100, method='bfgs',
disp=False, fit_kwds=None):
"""screen and select variables (columns) in exog
Parameters
----------
exog : ndarray
candidate explanatory variables that are screened for inclusion in
the model
endog : ndarray (optional)
use a new endog in the screening model.
This is not tested yet, and might not work correctly
maxiter : int
number of screening iterations
method : str
optimization method to use in fit, needs to be only of the gradient
optimizers
disp : bool
display option for fit during optimization
Returns
-------
res_screen : instance of ScreeningResults
The attribute `results_final` contains is the results instance
with the final model selection.
`idx_nonzero` contains the index of the selected exog in the full
exog, combined exog that are always kept plust exog_candidates.
see ScreeningResults for a full description
"""
model_class = self.model_class
if endog is None:
# allow a different endog than used in model
endog = self.endog
x0 = self.exog_keep
k_keep = self.k_keep
x1 = exog
k_current = x0.shape[1]
# TODO: remove the need for x, use x1 separately from x0
# needs change to idx to be based on x1 (candidate variables)
x = np.column_stack((x0, x1))
nobs, k_vars = x.shape
fkwds = fit_kwds if fit_kwds is not None else {}
fit_kwds = {'maxiter': 200, 'disp': False}
fit_kwds.update(fkwds)
history = defaultdict(list)
idx_nonzero = np.arange(k_keep, dtype=int)
keep = np.ones(k_keep, np.bool_)
idx_excl = np.arange(k_keep, k_vars)
mod_pen = model_class(endog, x0, **self.init_kwds)
# do not penalize initial estimate
mod_pen.pen_weight = 0
res_pen = mod_pen.fit(**fit_kwds)
start_params = res_pen.params
converged = False
idx_old = []
for it in range(maxiter):
# candidates for inclusion in next iteration
x1 = x[:, idx_excl]
mom_cond = self.ranking_measure(res_pen, x1, keep=keep)
assert len(mom_cond) == len(idx_excl)
mcs = np.sort(mom_cond)[::-1]
idx_thr = min((self.k_max_add, k_current + self.k_add, len(mcs)))
threshold = mcs[idx_thr]
# indices of exog in current expansion model
idx = np.concatenate((idx_nonzero, idx_excl[mom_cond > threshold]))
start_params2 = np.zeros(len(idx))
start_params2[:len(start_params)] = start_params
if self.use_weights:
weights = np.ones(len(idx))
weights[:k_keep] = 0
# modify Penalty instance attached to self
# damgerous if res_pen is reused
self.penal.weights = weights
mod_pen = model_class(endog, x[:, idx], penal=self.penal,
pen_weight=self.pen_weight,
**self.init_kwds)
res_pen = mod_pen.fit(method=method,
start_params=start_params2,
warn_convergence=False, skip_hessian=True,
**fit_kwds)
keep = np.abs(res_pen.params) > self.threshold_trim
# use largest params to keep
if keep.sum() > self.k_max_included:
# TODO we can use now np.partition with partial sort
thresh_params = np.sort(np.abs(res_pen.params))[
-self.k_max_included]
keep2 = np.abs(res_pen.params) > thresh_params
keep = np.logical_and(keep, keep2)
# Note: idx and keep are for current expansion model
# idx_nonzero has indices of selected variables in full exog
keep[:k_keep] = True # always keep exog_keep
idx_nonzero = idx[keep]
if disp:
print(keep)
print(idx_nonzero)
# x0 is exog of currently selected model, not used in iteration
# x0 = x[:, idx_nonzero]
k_current = len(idx_nonzero)
start_params = res_pen.params[keep]
# use mask to get excluded indices
mask_excl = np.ones(k_vars, dtype=bool)
mask_excl[idx_nonzero] = False
idx_excl = np.nonzero(mask_excl)[0]
history['idx_nonzero'].append(idx_nonzero)
history['keep'].append(keep)
history['params_keep'].append(start_params)
history['idx_added'].append(idx)
if (len(idx_nonzero) == len(idx_old) and
(idx_nonzero == idx_old).all()):
converged = True
break
idx_old = idx_nonzero
# final esimate
# check that we still have exog_keep
assert np.all(idx_nonzero[:k_keep] == np.arange(k_keep))
if self.use_weights:
weights = np.ones(len(idx_nonzero))
weights[:k_keep] = 0
# create new Penalty instance to avoide sharing attached penal
penal = self._get_penal(weights=weights)
else:
penal = self.penal
mod_final = model_class(endog, x[:, idx_nonzero],
penal=penal,
pen_weight=self.pen_weight,
**self.init_kwds)
res_final = mod_final.fit(method=method,
start_params=start_params,
warn_convergence=False,
**fit_kwds)
# set exog_names for final model
xnames = ['var%4d' % ii for ii in idx_nonzero]
res_final.model.exog_names[k_keep:] = xnames[k_keep:]
res = ScreeningResults(self,
results_pen = res_pen,
results_final = res_final,
idx_nonzero = idx_nonzero,
idx_exog = idx_nonzero[k_keep:] - k_keep,
idx_excl = idx_excl,
history = history,
converged = converged,
iterations = it + 1 # it is 0-based
)
return res | screen and select variables (columns) in exog
Parameters
----------
exog : ndarray
candidate explanatory variables that are screened for inclusion in
the model
endog : ndarray (optional)
use a new endog in the screening model.
This is not tested yet, and might not work correctly
maxiter : int
number of screening iterations
method : str
optimization method to use in fit, needs to be only of the gradient
optimizers
disp : bool
display option for fit during optimization
Returns
-------
res_screen : instance of ScreeningResults
The attribute `results_final` contains is the results instance
with the final model selection.
`idx_nonzero` contains the index of the selected exog in the full
exog, combined exog that are always kept plust exog_candidates.
see ScreeningResults for a full description | screen_exog | python | statsmodels/statsmodels | statsmodels/base/_screening.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_screening.py | BSD-3-Clause |
def screen_exog_iterator(self, exog_iterator):
"""
batched version of screen exog
This screens variables in a two step process:
In the first step screen_exog is used on each element of the
exog_iterator, and the batch winners are collected.
In the second step all batch winners are combined into a new array
of exog candidates and `screen_exog` is used to select a final
model.
Parameters
----------
exog_iterator : iterator over ndarrays
Returns
-------
res_screen_final : instance of ScreeningResults
This is the instance returned by the second round call to
`screen_exog`. Additional attributes are added to provide
more information about the batched selection process.
The index of final nonzero variables is
`idx_nonzero_batches` which is a 2-dimensional array with batch
index in the first column and variable index within batch in the
second column. They can be used jointly as index for the data
in the exog_iterator.
see ScreeningResults for a full description
"""
k_keep = self.k_keep
# res_batches = []
res_idx = []
exog_winner = []
exog_idx = []
for ex in exog_iterator:
res_screen = self.screen_exog(ex, maxiter=20)
# avoid storing res_screen, only for debugging
# res_batches.append(res_screen)
res_idx.append(res_screen.idx_nonzero)
exog_winner.append(ex[:, res_screen.idx_nonzero[k_keep:] - k_keep])
exog_idx.append(res_screen.idx_nonzero[k_keep:] - k_keep)
exog_winner = np.column_stack(exog_winner)
res_screen_final = self.screen_exog(exog_winner, maxiter=20)
exog_winner_names = ['var%d_%d' % (bidx, idx)
for bidx, batch in enumerate(exog_idx)
for idx in batch]
idx_full = [(bidx, idx)
for bidx, batch in enumerate(exog_idx)
for idx in batch]
ex_final_idx = res_screen_final.idx_nonzero[k_keep:] - k_keep
final_names = np.array(exog_winner_names)[ex_final_idx]
res_screen_final.idx_nonzero_batches = np.array(idx_full)[ex_final_idx]
res_screen_final.exog_final_names = final_names
history = {'idx_nonzero': res_idx,
'idx_exog': exog_idx}
res_screen_final.history_batches = history
return res_screen_final | batched version of screen exog
This screens variables in a two step process:
In the first step screen_exog is used on each element of the
exog_iterator, and the batch winners are collected.
In the second step all batch winners are combined into a new array
of exog candidates and `screen_exog` is used to select a final
model.
Parameters
----------
exog_iterator : iterator over ndarrays
Returns
-------
res_screen_final : instance of ScreeningResults
This is the instance returned by the second round call to
`screen_exog`. Additional attributes are added to provide
more information about the batched selection process.
The index of final nonzero variables is
`idx_nonzero_batches` which is a 2-dimensional array with batch
index in the first column and variable index within batch in the
second column. They can be used jointly as index for the data
in the exog_iterator.
see ScreeningResults for a full description | screen_exog_iterator | python | statsmodels/statsmodels | statsmodels/base/_screening.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_screening.py | BSD-3-Clause |
def _asarray_2d_null_rows(x):
"""
Makes sure input is an array and is 2d. Makes sure output is 2d. True
indicates a null in the rows of 2d x.
"""
# Have to have the asarrays because isnull does not account for array_like
# input
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
return np.any(isnull(x), axis=1)[:, None] | Makes sure input is an array and is 2d. Makes sure output is 2d. True
indicates a null in the rows of 2d x. | _asarray_2d_null_rows | python | statsmodels/statsmodels | statsmodels/base/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/data.py | BSD-3-Clause |
def _nan_rows(*arrs):
"""
Returns a boolean array which is True where any of the rows in any
of the _2d_ arrays in arrs are NaNs. Inputs can be any mixture of Series,
DataFrames or array_like.
"""
if len(arrs) == 1:
arrs += ([[False]],)
def _nan_row_maybe_two_inputs(x, y):
# check for dtype bc dataframe has dtypes
x_is_boolean_array = hasattr(x, "dtype") and x.dtype == bool and x
return np.logical_or(
_asarray_2d_null_rows(x), (x_is_boolean_array | _asarray_2d_null_rows(y))
)
return reduce(_nan_row_maybe_two_inputs, arrs).squeeze() | Returns a boolean array which is True where any of the rows in any
of the _2d_ arrays in arrs are NaNs. Inputs can be any mixture of Series,
DataFrames or array_like. | _nan_rows | python | statsmodels/statsmodels | statsmodels/base/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/data.py | BSD-3-Clause |
def handle_missing(cls, endog, exog, missing, **kwargs):
"""
This returns a dictionary with keys endog, exog and the keys of
kwargs. It preserves Nones.
"""
none_array_names = []
# patsy's already dropped NaNs in y/X
missing_idx = kwargs.pop("missing_idx", None)
if missing_idx is not None:
# y, X already handled by patsy. add back in later.
combined = ()
combined_names = []
if exog is None:
none_array_names += ["exog"]
elif exog is not None:
combined = (endog, exog)
combined_names = ["endog", "exog"]
else:
combined = (endog,)
combined_names = ["endog"]
none_array_names += ["exog"]
# deal with other arrays
combined_2d = ()
combined_2d_names = []
if len(kwargs):
for key, value_array in kwargs.items():
if value_array is None or np.ndim(value_array) == 0:
none_array_names += [key]
continue
# grab 1d arrays
if value_array.ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
elif value_array.squeeze().ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
# grab 2d arrays that are _assumed_ to be symmetric
elif value_array.ndim == 2:
combined_2d += (np.asarray(value_array),)
combined_2d_names += [key]
else:
raise ValueError(
"Arrays with more than 2 dimensions " "are not yet handled"
)
if missing_idx is not None:
nan_mask = missing_idx
updated_row_mask = None
if combined: # there were extra arrays not handled by patsy
combined_nans = _nan_rows(*combined)
if combined_nans.shape[0] != nan_mask.shape[0]:
raise ValueError(
"Shape mismatch between endog/exog "
"and extra arrays given to model."
)
# for going back and updated endog/exog
updated_row_mask = combined_nans[~nan_mask]
nan_mask |= combined_nans # for updating extra arrays only
if combined_2d:
combined_2d_nans = _nan_rows(combined_2d)
if combined_2d_nans.shape[0] != nan_mask.shape[0]:
raise ValueError(
"Shape mismatch between endog/exog "
"and extra 2d arrays given to model."
)
if updated_row_mask is not None:
updated_row_mask |= combined_2d_nans[~nan_mask]
else:
updated_row_mask = combined_2d_nans[~nan_mask]
nan_mask |= combined_2d_nans
else:
nan_mask = _nan_rows(*combined)
if combined_2d:
nan_mask = _nan_rows(*(nan_mask[:, None],) + combined_2d)
if not np.any(nan_mask): # no missing do not do anything
combined = dict(zip(combined_names, combined))
if combined_2d:
combined.update(dict(zip(combined_2d_names, combined_2d)))
if none_array_names:
combined.update({k: kwargs.get(k, None) for k in none_array_names})
if missing_idx is not None:
combined.update({"endog": endog})
if exog is not None:
combined.update({"exog": exog})
return combined, []
elif missing == "raise":
raise MissingDataError("NaNs were encountered in the data")
elif missing == "drop":
nan_mask = ~nan_mask
def drop_nans(x):
return cls._drop_nans(x, nan_mask)
def drop_nans_2d(x):
return cls._drop_nans_2d(x, nan_mask)
combined = dict(zip(combined_names, lmap(drop_nans, combined)))
if missing_idx is not None:
if updated_row_mask is not None:
updated_row_mask = ~updated_row_mask
# update endog/exog with this new information
endog = cls._drop_nans(endog, updated_row_mask)
if exog is not None:
exog = cls._drop_nans(exog, updated_row_mask)
combined.update({"endog": endog})
if exog is not None:
combined.update({"exog": exog})
if combined_2d:
combined.update(
dict(zip(combined_2d_names, lmap(drop_nans_2d, combined_2d)))
)
if none_array_names:
combined.update({k: kwargs.get(k, None) for k in none_array_names})
return combined, np.where(~nan_mask)[0].tolist()
else:
raise ValueError("missing option %s not understood" % missing) | This returns a dictionary with keys endog, exog and the keys of
kwargs. It preserves Nones. | handle_missing | python | statsmodels/statsmodels | statsmodels/base/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/data.py | BSD-3-Clause |
def cov_names(self):
"""
Labels for covariance matrices
In multidimensional models, each dimension of a covariance matrix
differs from the number of param_names.
If not set, returns param_names
"""
# for handling names of covariance names in multidimensional models
if self._cov_names is not None:
return self._cov_names
return self.param_names | Labels for covariance matrices
In multidimensional models, each dimension of a covariance matrix
differs from the number of param_names.
If not set, returns param_names | cov_names | python | statsmodels/statsmodels | statsmodels/base/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/data.py | BSD-3-Clause |
def handle_data_class_factory(endog, exog):
"""
Given inputs
"""
if data_util._is_using_ndarray_type(endog, exog):
klass = ModelData
elif data_util._is_using_pandas(endog, exog):
klass = PandasData
elif data_util._is_using_patsy(endog, exog):
klass = PatsyData
elif data_util._is_using_formulaic(endog, exog):
klass = FormulaicData
# keep this check last
elif data_util._is_using_ndarray(endog, exog):
klass = ModelData
else:
raise ValueError(
"unrecognized data structures: %s / %s" % (type(endog), type(exog))
)
return klass | Given inputs | handle_data_class_factory | python | statsmodels/statsmodels | statsmodels/base/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/data.py | BSD-3-Clause |
def transform_boxcox(self, x, lmbda=None, method='guerrero', **kwargs):
"""
Performs a Box-Cox transformation on the data array x. If lmbda is None,
the indicated method is used to estimate a suitable lambda parameter.
Parameters
----------
x : array_like
lmbda : float
The lambda parameter for the Box-Cox transform. If None, a value
will be estimated by means of the specified method.
method : {'guerrero', 'loglik'}
The method to estimate the lambda parameter. Will only be used if
lmbda is None, and defaults to 'guerrero', detailed in Guerrero
(1993). 'loglik' maximizes the profile likelihood.
**kwargs
Options for the specified method.
* For 'guerrero', this entails window_length, the grouping
parameter, scale, the dispersion measure, and options, to be
passed to the optimizer.
* For 'loglik': options, to be passed to the optimizer.
Returns
-------
y : array_like
The transformed series.
lmbda : float
The lmbda parameter used to transform the series.
References
----------
Guerrero, Victor M. 1993. "Time-series analysis supported by power
transformations". `Journal of Forecasting`. 12 (1): 37-48.
Guerrero, Victor M. and Perera, Rafael. 2004. "Variance Stabilizing
Power Transformation for Time Series," `Journal of Modern Applied
Statistical Methods`. 3 (2): 357-369.
Box, G. E. P., and D. R. Cox. 1964. "An Analysis of Transformations".
`Journal of the Royal Statistical Society`. 26 (2): 211-252.
"""
x = np.asarray(x)
if np.any(x <= 0):
raise ValueError("Non-positive x.")
if lmbda is None:
lmbda = self._est_lambda(x,
method=method,
**kwargs)
# if less than 0.01, treat lambda as zero.
if np.isclose(lmbda, 0.):
y = np.log(x)
else:
y = (np.power(x, lmbda) - 1.) / lmbda
return y, lmbda | Performs a Box-Cox transformation on the data array x. If lmbda is None,
the indicated method is used to estimate a suitable lambda parameter.
Parameters
----------
x : array_like
lmbda : float
The lambda parameter for the Box-Cox transform. If None, a value
will be estimated by means of the specified method.
method : {'guerrero', 'loglik'}
The method to estimate the lambda parameter. Will only be used if
lmbda is None, and defaults to 'guerrero', detailed in Guerrero
(1993). 'loglik' maximizes the profile likelihood.
**kwargs
Options for the specified method.
* For 'guerrero', this entails window_length, the grouping
parameter, scale, the dispersion measure, and options, to be
passed to the optimizer.
* For 'loglik': options, to be passed to the optimizer.
Returns
-------
y : array_like
The transformed series.
lmbda : float
The lmbda parameter used to transform the series.
References
----------
Guerrero, Victor M. 1993. "Time-series analysis supported by power
transformations". `Journal of Forecasting`. 12 (1): 37-48.
Guerrero, Victor M. and Perera, Rafael. 2004. "Variance Stabilizing
Power Transformation for Time Series," `Journal of Modern Applied
Statistical Methods`. 3 (2): 357-369.
Box, G. E. P., and D. R. Cox. 1964. "An Analysis of Transformations".
`Journal of the Royal Statistical Society`. 26 (2): 211-252. | transform_boxcox | python | statsmodels/statsmodels | statsmodels/base/transform.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/transform.py | BSD-3-Clause |
def untransform_boxcox(self, x, lmbda, method='naive'):
"""
Back-transforms the Box-Cox transformed data array, by means of the
indicated method. The provided argument lmbda should be the lambda
parameter that was used to initially transform the data.
Parameters
----------
x : array_like
The transformed series.
lmbda : float
The lambda parameter that was used to transform the series.
method : {'naive'}
Indicates the method to be used in the untransformation. Defaults
to 'naive', which reverses the transformation.
NOTE: 'naive' is implemented natively, while other methods may be
available in subclasses!
Returns
-------
y : array_like
The untransformed series.
"""
method = method.lower()
x = np.asarray(x)
if method == 'naive':
if np.isclose(lmbda, 0.):
y = np.exp(x)
else:
y = np.power(lmbda * x + 1, 1. / lmbda)
else:
raise ValueError(f"Method '{method}' not understood.")
return y | Back-transforms the Box-Cox transformed data array, by means of the
indicated method. The provided argument lmbda should be the lambda
parameter that was used to initially transform the data.
Parameters
----------
x : array_like
The transformed series.
lmbda : float
The lambda parameter that was used to transform the series.
method : {'naive'}
Indicates the method to be used in the untransformation. Defaults
to 'naive', which reverses the transformation.
NOTE: 'naive' is implemented natively, while other methods may be
available in subclasses!
Returns
-------
y : array_like
The untransformed series. | untransform_boxcox | python | statsmodels/statsmodels | statsmodels/base/transform.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/transform.py | BSD-3-Clause |
def _est_lambda(self, x, bounds=(-1, 2), method='guerrero', **kwargs):
"""
Computes an estimate for the lambda parameter in the Box-Cox
transformation using method.
Parameters
----------
x : array_like
The untransformed data.
bounds : tuple
Numeric 2-tuple, that indicate the solution space for the lambda
parameter. Default (-1, 2).
method : {'guerrero', 'loglik'}
The method by which to estimate lambda. Defaults to 'guerrero', but
the profile likelihood ('loglik') is also available.
**kwargs
Options for the specified method.
* For 'guerrero': window_length (int), the seasonality/grouping
parameter. Scale ({'mad', 'sd'}), the dispersion measure. Options
(dict), to be passed to the optimizer.
* For 'loglik': Options (dict), to be passed to the optimizer.
Returns
-------
lmbda : float
The lambda parameter.
"""
method = method.lower()
if len(bounds) != 2:
raise ValueError("Bounds of length {} not understood."
.format(len(bounds)))
elif bounds[0] >= bounds[1]:
raise ValueError("Lower bound exceeds upper bound.")
if method == 'guerrero':
lmbda = self._guerrero_cv(x, bounds=bounds, **kwargs)
elif method == 'loglik':
lmbda = self._loglik_boxcox(x, bounds=bounds, **kwargs)
else:
raise ValueError(f"Method '{method}' not understood.")
return lmbda | Computes an estimate for the lambda parameter in the Box-Cox
transformation using method.
Parameters
----------
x : array_like
The untransformed data.
bounds : tuple
Numeric 2-tuple, that indicate the solution space for the lambda
parameter. Default (-1, 2).
method : {'guerrero', 'loglik'}
The method by which to estimate lambda. Defaults to 'guerrero', but
the profile likelihood ('loglik') is also available.
**kwargs
Options for the specified method.
* For 'guerrero': window_length (int), the seasonality/grouping
parameter. Scale ({'mad', 'sd'}), the dispersion measure. Options
(dict), to be passed to the optimizer.
* For 'loglik': Options (dict), to be passed to the optimizer.
Returns
-------
lmbda : float
The lambda parameter. | _est_lambda | python | statsmodels/statsmodels | statsmodels/base/transform.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/transform.py | BSD-3-Clause |
def _guerrero_cv(self, x, bounds, window_length=4, scale='sd',
options={'maxiter': 25}):
"""
Computes lambda using guerrero's coefficient of variation. If no
seasonality is present in the data, window_length is set to 4 (as
per Guerrero and Perera, (2004)).
NOTE: Seasonality-specific auxiliaries *should* provide their own
seasonality parameter.
Parameters
----------
x : array_like
bounds : tuple
Numeric 2-tuple, that indicate the solution space for the lambda
parameter.
window_length : int
Seasonality/grouping parameter. Default 4, as per Guerrero and
Perera (2004). NOTE: this indicates the length of the individual
groups, not the total number of groups!
scale : {'sd', 'mad'}
The dispersion measure to be used. 'sd' indicates the sample
standard deviation, but the more robust 'mad' is also available.
options : dict
The options (as a dict) to be passed to the optimizer.
"""
nobs = len(x)
groups = int(nobs / window_length)
# remove the first n < window_length observations from consideration.
grouped_data = np.reshape(x[nobs - (groups * window_length): nobs],
(groups, window_length))
mean = np.mean(grouped_data, 1)
scale = scale.lower()
if scale == 'sd':
dispersion = np.std(grouped_data, 1, ddof=1)
elif scale == 'mad':
dispersion = mad(grouped_data, axis=1)
else:
raise ValueError(f"Scale '{scale}' not understood.")
def optim(lmbda):
rat = np.divide(dispersion, np.power(mean, 1 - lmbda)) # eq 6, p 40
return np.std(rat, ddof=1) / np.mean(rat)
res = minimize_scalar(optim,
bounds=bounds,
method='bounded',
options=options)
return res.x | Computes lambda using guerrero's coefficient of variation. If no
seasonality is present in the data, window_length is set to 4 (as
per Guerrero and Perera, (2004)).
NOTE: Seasonality-specific auxiliaries *should* provide their own
seasonality parameter.
Parameters
----------
x : array_like
bounds : tuple
Numeric 2-tuple, that indicate the solution space for the lambda
parameter.
window_length : int
Seasonality/grouping parameter. Default 4, as per Guerrero and
Perera (2004). NOTE: this indicates the length of the individual
groups, not the total number of groups!
scale : {'sd', 'mad'}
The dispersion measure to be used. 'sd' indicates the sample
standard deviation, but the more robust 'mad' is also available.
options : dict
The options (as a dict) to be passed to the optimizer. | _guerrero_cv | python | statsmodels/statsmodels | statsmodels/base/transform.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/transform.py | BSD-3-Clause |
def _loglik_boxcox(self, x, bounds, options={'maxiter': 25}):
"""
Taken from the Stata manual on Box-Cox regressions, where this is the
special case of 'lhs only'. As an estimator for the variance, the
sample variance is used, by means of the well-known formula.
Parameters
----------
x : array_like
options : dict
The options (as a dict) to be passed to the optimizer.
"""
sum_x = np.sum(np.log(x))
nobs = len(x)
def optim(lmbda):
y, lmbda = self.transform_boxcox(x, lmbda)
return (1 - lmbda) * sum_x + (nobs / 2.) * np.log(np.var(y))
res = minimize_scalar(optim,
bounds=bounds,
method='bounded',
options=options)
return res.x | Taken from the Stata manual on Box-Cox regressions, where this is the
special case of 'lhs only'. As an estimator for the variance, the
sample variance is used, by means of the well-known formula.
Parameters
----------
x : array_like
options : dict
The options (as a dict) to be passed to the optimizer. | _loglik_boxcox | python | statsmodels/statsmodels | statsmodels/base/transform.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/transform.py | BSD-3-Clause |
def _fit(self, objective, gradient, start_params, fargs, kwargs,
hessian=None, method='newton', maxiter=100, full_output=True,
disp=True, callback=None, retall=False):
"""
Fit function for any model with an objective function.
Parameters
----------
objective : function
Objective function to be minimized.
gradient : function
The gradient of the objective function.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
hessian : str, optional
Method for computing the Hessian matrix, if applicable.
method : str {'newton','nm','bfgs','powell','cg','ncg','basinhopping',
'minimize'}
Method can be 'newton' for Newton-Raphson, 'nm' for Nelder-Mead,
'bfgs' for Broyden-Fletcher-Goldfarb-Shanno, 'powell' for modified
Powell's method, 'cg' for conjugate gradient, 'ncg' for Newton-
conjugate gradient, 'basinhopping' for global basin-hopping
solver, if available or a generic 'minimize' which is a wrapper for
scipy.optimize.minimize. `method` determines which solver from
scipy.optimize is used. The explicit arguments in `fit` are passed
to the solver, with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports..
maxiter : int
The maximum number of iterations to perform.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool
Set to True to print convergence messages.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
optim_settings : dict
A dictionary that contains the parameters passed to the solver.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for the solvers (available in Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.inf is max, -np.inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'lbfgs'
m : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many
terms in an approximation to it.)
pgtol : float
The iteration will stop when
``max{|proj g_i | i = 1, ..., n} <= pgtol`` where pg_i is
the i-th component of the projected gradient.
factr : float
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where eps is the machine precision, which is automatically
generated by the code. Typical values for factr are: 1e12
for low accuracy; 1e7 for moderate accuracy; 10.0 for
extremely high accuracy. See Notes for relationship to
ftol, which is exposed (instead of factr) by the
scipy.optimize.minimize interface to L-BFGS-B.
maxfun : int
Maximum number of iterations.
epsilon : float
Step size used when approx_grad is True, for numerically
calculating the gradient
approx_grad : bool
Whether to approximate the gradient numerically (in which
case func returns only the function value).
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.inf is max, -np.inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : int
The number of basin hopping iterations.
niter_success : int
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : int
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
'minimize'
min_method : str, optional
Name of minimization method to use.
Any method specific arguments can be passed directly.
For a list of methods and their arguments, see
documentation of `scipy.optimize.minimize`.
If no method is specified, then BFGS is used.
"""
# TODO: generalize the regularization stuff
# Extract kwargs specific to fit_regularized calling fit
extra_fit_funcs = kwargs.get('extra_fit_funcs', dict())
methods = ['newton', 'nm', 'bfgs', 'lbfgs', 'powell', 'cg', 'ncg',
'basinhopping', 'minimize']
methods += extra_fit_funcs.keys()
method = method.lower()
_check_method(method, methods)
fit_funcs = {
'newton': _fit_newton,
'nm': _fit_nm, # Nelder-Mead
'bfgs': _fit_bfgs,
'lbfgs': _fit_lbfgs,
'cg': _fit_cg,
'ncg': _fit_ncg,
'powell': _fit_powell,
'basinhopping': _fit_basinhopping,
'minimize': _fit_minimize # wrapper for scipy.optimize.minimize
}
# NOTE: fit_regularized checks the methods for these but it should be
# moved up probably
if extra_fit_funcs:
fit_funcs.update(extra_fit_funcs)
func = fit_funcs[method]
xopt, retvals = func(objective, gradient, start_params, fargs, kwargs,
disp=disp, maxiter=maxiter, callback=callback,
retall=retall, full_output=full_output,
hess=hessian)
optim_settings = {'optimizer': method, 'start_params': start_params,
'maxiter': maxiter, 'full_output': full_output,
'disp': disp, 'fargs': fargs, 'callback': callback,
'retall': retall, "extra_fit_funcs": extra_fit_funcs}
optim_settings.update(kwargs)
# set as attributes or return?
return xopt, retvals, optim_settings | Fit function for any model with an objective function.
Parameters
----------
objective : function
Objective function to be minimized.
gradient : function
The gradient of the objective function.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
hessian : str, optional
Method for computing the Hessian matrix, if applicable.
method : str {'newton','nm','bfgs','powell','cg','ncg','basinhopping',
'minimize'}
Method can be 'newton' for Newton-Raphson, 'nm' for Nelder-Mead,
'bfgs' for Broyden-Fletcher-Goldfarb-Shanno, 'powell' for modified
Powell's method, 'cg' for conjugate gradient, 'ncg' for Newton-
conjugate gradient, 'basinhopping' for global basin-hopping
solver, if available or a generic 'minimize' which is a wrapper for
scipy.optimize.minimize. `method` determines which solver from
scipy.optimize is used. The explicit arguments in `fit` are passed
to the solver, with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports..
maxiter : int
The maximum number of iterations to perform.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool
Set to True to print convergence messages.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
optim_settings : dict
A dictionary that contains the parameters passed to the solver.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for the solvers (available in Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.inf is max, -np.inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'lbfgs'
m : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many
terms in an approximation to it.)
pgtol : float
The iteration will stop when
``max{|proj g_i | i = 1, ..., n} <= pgtol`` where pg_i is
the i-th component of the projected gradient.
factr : float
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where eps is the machine precision, which is automatically
generated by the code. Typical values for factr are: 1e12
for low accuracy; 1e7 for moderate accuracy; 10.0 for
extremely high accuracy. See Notes for relationship to
ftol, which is exposed (instead of factr) by the
scipy.optimize.minimize interface to L-BFGS-B.
maxfun : int
Maximum number of iterations.
epsilon : float
Step size used when approx_grad is True, for numerically
calculating the gradient
approx_grad : bool
Whether to approximate the gradient numerically (in which
case func returns only the function value).
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.inf is max, -np.inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : int
The number of basin hopping iterations.
niter_success : int
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : int
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
'minimize'
min_method : str, optional
Name of minimization method to use.
Any method specific arguments can be passed directly.
For a list of methods and their arguments, see
documentation of `scipy.optimize.minimize`.
If no method is specified, then BFGS is used. | _fit | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_constrained(self, params):
"""
TODO: how to add constraints?
Something like
sm.add_constraint(Model, func)
or
model_instance.add_constraint(func)
model_instance.add_constraint("x1 + x2 = 2")
result = model_instance.fit()
"""
raise NotImplementedError | TODO: how to add constraints?
Something like
sm.add_constraint(Model, func)
or
model_instance.add_constraint(func)
model_instance.add_constraint("x1 + x2 = 2")
result = model_instance.fit() | _fit_constrained | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_minimize(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using scipy minimize, where kwarg `min_method` defines the algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
kwargs.setdefault('min_method', 'BFGS')
# prepare options dict for minimize
filter_opts = ['extra_fit_funcs', 'niter', 'min_method', 'tol', 'bounds', 'constraints']
options = {k: v for k, v in kwargs.items() if k not in filter_opts}
options['disp'] = disp
options['maxiter'] = maxiter
# Use Hessian/Jacobian only if they're required by the method
no_hess = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'COBYLA', 'SLSQP']
no_jac = ['Nelder-Mead', 'Powell', 'COBYLA']
if kwargs['min_method'] in no_hess:
hess = None
if kwargs['min_method'] in no_jac:
score = None
# Use bounds/constraints only if they're allowed by the method
has_bounds = ['L-BFGS-B', 'TNC', 'SLSQP', 'trust-constr']
# Added in SP 1.5
if not SP_LT_15:
has_bounds += ['Powell']
# Added in SP 1.7
if not SP_LT_17:
has_bounds += ['Nelder-Mead']
has_constraints = ['COBYLA', 'SLSQP', 'trust-constr']
if 'bounds' in kwargs.keys() and kwargs['min_method'] in has_bounds:
bounds = kwargs['bounds']
else:
bounds = None
if 'constraints' in kwargs.keys() and kwargs['min_method'] in has_constraints:
constraints = kwargs['constraints']
else:
constraints = ()
res = optimize.minimize(f, start_params, args=fargs, method=kwargs['min_method'],
jac=score, hess=hess, bounds=bounds, constraints=constraints,
callback=callback, options=options)
xopt = res.x
retvals = None
if full_output:
nit = getattr(res, 'nit', np.nan) # scipy 0.14 compat
retvals = {'fopt': res.fun, 'iterations': nit,
'fcalls': res.nfev, 'warnflag': res.status,
'converged': res.success}
if retall:
retvals.update({'allvecs': res.values()})
return xopt, retvals | Fit using scipy minimize, where kwarg `min_method` defines the algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_minimize | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_newton(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None, ridge_factor=1e-10):
"""
Fit using Newton-Raphson algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
ridge_factor : float
Regularization factor for Hessian matrix.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("tol", "ridge_factor"), "newton")
tol = kwargs.setdefault('tol', 1e-8)
ridge_factor = kwargs.setdefault('ridge_factor', 1e-10)
iterations = 0
oldparams = np.inf
newparams = np.asarray(start_params)
if retall:
history = [oldparams, newparams]
while (iterations < maxiter and np.any(np.abs(newparams -
oldparams) > tol)):
H = np.asarray(hess(newparams))
# regularize Hessian, not clear what ridge factor should be
# keyword option with absolute default 1e-10, see #1847
if not np.all(ridge_factor == 0):
H[np.diag_indices(H.shape[0])] += ridge_factor
oldparams = newparams
newparams = oldparams - np.linalg.solve(H, score(oldparams))
if retall:
history.append(newparams)
if callback is not None:
callback(newparams)
iterations += 1
fval = f(newparams, *fargs) # this is the negative likelihood
if iterations == maxiter:
warnflag = 1
if disp:
print("Warning: Maximum number of iterations has been "
"exceeded.")
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
else:
warnflag = 0
if disp:
print("Optimization terminated successfully.")
print(" Current function value: %f" % fval)
print(" Iterations %d" % iterations)
if full_output:
(xopt, fopt, niter,
gopt, hopt) = (newparams, f(newparams, *fargs),
iterations, score(newparams),
hess(newparams))
converged = not warnflag
retvals = {'fopt': fopt, 'iterations': niter, 'score': gopt,
'Hessian': hopt, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': history})
else:
xopt = newparams
retvals = None
return xopt, retvals | Fit using Newton-Raphson algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
ridge_factor : float
Regularization factor for Hessian matrix.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_newton | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_bfgs(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Broyden-Fletcher-Goldfarb-Shannon algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("gtol", "norm", "epsilon"), "bfgs")
gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
norm = kwargs.setdefault('norm', np.inf)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_bfgs(f, start_params, score, args=fargs,
gtol=gtol, norm=norm, epsilon=epsilon,
maxiter=maxiter, full_output=full_output,
disp=disp, retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, gopt, Hinv, fcalls, gcalls, warnflag = retvals
else:
(xopt, fopt, gopt, Hinv, fcalls,
gcalls, warnflag, allvecs) = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'gopt': gopt, 'Hinv': Hinv,
'fcalls': fcalls, 'gcalls': gcalls, 'warnflag':
warnflag, 'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals | Fit using Broyden-Fletcher-Goldfarb-Shannon algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_bfgs | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_lbfgs(f, score, start_params, fargs, kwargs, disp=True, maxiter=100,
callback=None, retall=False, full_output=True, hess=None):
"""
Fit using Limited-memory Broyden-Fletcher-Goldfarb-Shannon algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
Notes
-----
Within the mle part of statsmodels, the log likelihood function and
its gradient with respect to the parameters do not have notationally
consistent sign.
"""
check_kwargs(
kwargs,
("m", "pgtol", "factr", "maxfun", "epsilon", "approx_grad", "bounds", "loglike_and_score", "iprint"),
"lbfgs"
)
# Use unconstrained optimization by default.
bounds = kwargs.setdefault('bounds', [(None, None)] * len(start_params))
kwargs.setdefault('iprint', 0)
# Pass the following keyword argument names through to fmin_l_bfgs_b
# if they are present in kwargs, otherwise use the fmin_l_bfgs_b
# default values.
names = ('m', 'pgtol', 'factr', 'maxfun', 'epsilon', 'approx_grad')
extra_kwargs = {x: kwargs[x] for x in names if x in kwargs}
# Extract values for the options related to the gradient.
approx_grad = kwargs.get('approx_grad', False)
loglike_and_score = kwargs.get('loglike_and_score', None)
epsilon = kwargs.get('epsilon', None)
# The approx_grad flag has superpowers nullifying the score function arg.
if approx_grad:
score = None
# Choose among three options for dealing with the gradient (the gradient
# of a log likelihood function with respect to its parameters
# is more specifically called the score in statistics terminology).
# The first option is to use the finite-differences
# approximation that is built into the fmin_l_bfgs_b optimizer.
# The second option is to use the provided score function.
# The third option is to use the score component of a provided
# function that simultaneously evaluates the log likelihood and score.
if epsilon and not approx_grad:
raise ValueError('a finite-differences epsilon was provided '
'even though we are not using approx_grad')
if approx_grad and loglike_and_score:
raise ValueError('gradient approximation was requested '
'even though an analytic loglike_and_score function '
'was given')
if loglike_and_score:
def func(p, *a):
return tuple(-x for x in loglike_and_score(p, *a))
elif score:
func = f
extra_kwargs['fprime'] = score
elif approx_grad:
func = f
retvals = optimize.fmin_l_bfgs_b(func, start_params, maxiter=maxiter,
callback=callback, args=fargs,
bounds=bounds, disp=disp,
**extra_kwargs)
if full_output:
xopt, fopt, d = retvals
# The warnflag is
# 0 if converged
# 1 if too many function evaluations or too many iterations
# 2 if stopped for another reason, given in d['task']
warnflag = d['warnflag']
converged = (warnflag == 0)
gopt = d['grad']
fcalls = d['funcalls']
iterations = d['nit']
retvals = {'fopt': fopt, 'gopt': gopt, 'fcalls': fcalls,
'warnflag': warnflag, 'converged': converged,
'iterations': iterations}
else:
xopt = retvals[0]
retvals = None
return xopt, retvals | Fit using Limited-memory Broyden-Fletcher-Goldfarb-Shannon algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
Notes
-----
Within the mle part of statsmodels, the log likelihood function and
its gradient with respect to the parameters do not have notationally
consistent sign. | _fit_lbfgs | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_nm(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Nelder-Mead algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("xtol", "ftol", "maxfun"), "nm")
xtol = kwargs.setdefault('xtol', 0.0001)
ftol = kwargs.setdefault('ftol', 0.0001)
maxfun = kwargs.setdefault('maxfun', None)
retvals = optimize.fmin(f, start_params, args=fargs, xtol=xtol,
ftol=ftol, maxiter=maxiter, maxfun=maxfun,
full_output=full_output, disp=disp, retall=retall,
callback=callback)
if full_output:
if not retall:
xopt, fopt, niter, fcalls, warnflag = retvals
else:
xopt, fopt, niter, fcalls, warnflag, allvecs = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'iterations': niter,
'fcalls': fcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals | Fit using Nelder-Mead algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_nm | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_cg(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Conjugate Gradient algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("gtol", "norm", "epsilon"), "cg")
gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
norm = kwargs.setdefault('norm', np.inf)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_cg(f, start_params, score, gtol=gtol, norm=norm,
epsilon=epsilon, maxiter=maxiter,
full_output=full_output, disp=disp,
retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, fcalls, gcalls, warnflag = retvals
else:
xopt, fopt, fcalls, gcalls, warnflag, allvecs = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,
'warnflag': warnflag, 'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals | Fit using Conjugate Gradient algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_cg | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_ncg(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Newton Conjugate Gradient algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("fhess_p", "avextol", "epsilon"), "ncg")
fhess_p = kwargs.setdefault('fhess_p', None)
avextol = kwargs.setdefault('avextol', 1.0000000000000001e-05)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_ncg(f, start_params, score, fhess_p=fhess_p,
fhess=hess, args=fargs, avextol=avextol,
epsilon=epsilon, maxiter=maxiter,
full_output=full_output, disp=disp,
retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, fcalls, gcalls, hcalls, warnflag = retvals
else:
xopt, fopt, fcalls, gcalls, hcalls, warnflag, allvecs = \
retvals
converged = not warnflag
retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,
'hcalls': hcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals | Fit using Newton Conjugate Gradient algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_ncg | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_powell(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Powell's conjugate direction algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(kwargs, ("xtol", "ftol", "maxfun", "start_direc"), "powell")
xtol = kwargs.setdefault('xtol', 0.0001)
ftol = kwargs.setdefault('ftol', 0.0001)
maxfun = kwargs.setdefault('maxfun', None)
start_direc = kwargs.setdefault('start_direc', None)
retvals = optimize.fmin_powell(f, start_params, args=fargs, xtol=xtol,
ftol=ftol, maxiter=maxiter, maxfun=maxfun,
full_output=full_output, disp=disp,
retall=retall, callback=callback,
direc=start_direc)
if full_output:
if not retall:
xopt, fopt, direc, niter, fcalls, warnflag = retvals
else:
xopt, fopt, direc, niter, fcalls, warnflag, allvecs = \
retvals
converged = not warnflag
retvals = {'fopt': fopt, 'direc': direc, 'iterations': niter,
'fcalls': fcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals | Fit using Powell's conjugate direction algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_powell | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def _fit_basinhopping(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Fit using Basin-hopping algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
"""
check_kwargs(
kwargs,
("niter", "niter_success", "T", "stepsize", "interval", "minimizer", "seed"),
"basinhopping"
)
kwargs = {k: v for k, v in kwargs.items()}
niter = kwargs.setdefault('niter', 100)
niter_success = kwargs.setdefault('niter_success', None)
T = kwargs.setdefault('T', 1.0)
stepsize = kwargs.setdefault('stepsize', 0.5)
interval = kwargs.setdefault('interval', 50)
seed = kwargs.get("seed")
minimizer_kwargs = kwargs.get('minimizer', {})
minimizer_kwargs['args'] = fargs
minimizer_kwargs['jac'] = score
method = minimizer_kwargs.get('method', None)
if method and method != 'L-BFGS-B': # l_bfgs_b does not take a hessian
minimizer_kwargs['hess'] = hess
retvals = optimize.basinhopping(f, start_params,
minimizer_kwargs=minimizer_kwargs,
niter=niter, niter_success=niter_success,
T=T, stepsize=stepsize, disp=disp,
callback=callback, interval=interval,
seed=seed)
xopt = retvals.x
if full_output:
retvals = {
'fopt': retvals.fun,
'iterations': retvals.nit,
'fcalls': retvals.nfev,
'converged': 'completed successfully' in retvals.message[0]
}
else:
retvals = None
return xopt, retvals | Fit using Basin-hopping algorithm.
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
fargs : tuple
Extra arguments passed to the objective function, i.e.
objective(x,*args)
kwargs : dict[str, Any]
Extra keyword arguments passed to the objective function, i.e.
objective(x,**kwargs)
disp : bool
Set to True to print convergence messages.
maxiter : int
The maximum number of iterations to perform.
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
hess : str, optional
Method for computing the Hessian matrix, if applicable.
Returns
-------
xopt : ndarray
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None. | _fit_basinhopping | python | statsmodels/statsmodels | statsmodels/base/optimizer.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/optimizer.py | BSD-3-Clause |
def normalize_cov_type(cov_type):
"""
Normalize the cov_type string to a canonical version
Parameters
----------
cov_type : str
Returns
-------
normalized_cov_type : str
"""
if cov_type == 'nw-panel':
cov_type = 'hac-panel'
if cov_type == 'nw-groupsum':
cov_type = 'hac-groupsum'
return cov_type | Normalize the cov_type string to a canonical version
Parameters
----------
cov_type : str
Returns
-------
normalized_cov_type : str | normalize_cov_type | python | statsmodels/statsmodels | statsmodels/base/covtype.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/covtype.py | BSD-3-Clause |
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwds):
"""create new results instance with robust covariance as default
Parameters
----------
cov_type : str
the type of robust sandwich estimator to use. see Notes below
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
kwds : depends on cov_type
Required or optional arguments for robust covariance calculation.
see Notes below
Returns
-------
results : results instance
This method creates a new results instance with the requested
robust covariance as the default covariance of the parameters.
Inferential statistics like p-values and hypothesis tests will be
based on this covariance matrix.
Notes
-----
Warning: Some of the options and defaults in cov_kwds may be changed in a
future version.
The covariance keywords provide an option 'scaling_factor' to adjust the
scaling of the covariance matrix, that is the covariance is multiplied by
this factor if it is given and is not `None`. This allows the user to
adjust the scaling of the covariance matrix to match other statistical
packages.
For example, `scaling_factor=(nobs - 1.) / (nobs - k_params)` provides a
correction so that the robust covariance matrices match those of Stata in
some models like GLM and discrete Models.
The following covariance types and required or optional arguments are
currently available:
- 'HC0', 'HC1', 'HC2', 'HC3': heteroscedasticity robust covariance
- no keyword arguments
- 'HAC': heteroskedasticity-autocorrelation robust covariance
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
kernels currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
``use_correction``: bool, optional
If true, use small sample correction
- 'cluster': clustered covariance estimator
``groups`` : array_like[int], required :
Integer-valued index of clusters or groups.
``use_correction``: bool, optional
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
``df_correction``: bool, optional
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is also
adjusted. When `use_t` is also True, then pvalues are
computed using the Student's t distribution using the
corrected values. These may differ substantially from
p-values based on the normal is the number of groups is
small.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum': Driscoll and Kraay, heteroscedasticity and
autocorrelation robust covariance for panel data
# TODO: more options needed here
``time`` : array_like, required
index of time periods
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
The available kernels are ['bartlett', 'uniform']. The default is
Bartlett.
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the the sandwich covariance is calculated without small
sample correction. If `use_correction = 'cluster'` (default),
then the same small sample correction as in the case of
`covtype='cluster'` is used.
``df_correction`` : bool, optional
The adjustment to df_resid, see cov_type 'cluster' above
- 'hac-panel': heteroscedasticity and autocorrelation robust standard
errors in panel data. The data needs to be sorted in this case, the
time series for each panel unit or cluster need to be stacked. The
membership to a time series of an individual or group can be either
specified by group indicators or by increasing time periods. One of
``groups`` or ``time`` is required. # TODO: we need more options here
``groups`` : array_like[int]
indicator for groups
``time`` : array_like[int]
index of time periods
``maxlags`` : int, required
number of lags to use
``kernel`` : {callable, str}, optional
Available kernels are ['bartlett', 'uniform'], default
is Bartlett
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the sandwich covariance is calculated without
small sample correction.
``df_correction`` : bool, optional
Adjustment to df_resid, see cov_type 'cluster' above
**Reminder**: ``use_correction`` in "hac-groupsum" and "hac-panel" is
not bool, needs to be in {False, 'hac', 'cluster'}.
.. todo:: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
import statsmodels.stats.sandwich_covariance as sw
cov_type = normalize_cov_type(cov_type)
if 'kernel' in kwds:
kwds['weights_func'] = kwds.pop('kernel')
if 'weights_func' in kwds and not callable(kwds['weights_func']):
kwds['weights_func'] = sw.kernel_dict[kwds['weights_func']]
# pop because HCx raises if any kwds
sc_factor = kwds.pop('scaling_factor', None)
# TODO: make separate function that returns a robust cov plus info
use_self = kwds.pop('use_self', False)
if use_self:
res = self
else:
# this does not work for most models, use raw instance instead from fit
res = self.__class__(self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t':use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'hac-panel', 'hac-groupsum']:
df_correction = kwds.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user did not explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwds, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwds
if cov_type.upper() in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwds:
raise ValueError('heteroscedasticity robust covariance '
'does not use keywords')
res.cov_kwds['description'] = descriptions[cov_type.upper()]
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper(), None)
if res.cov_params_default is None:
# results classes that do not have cov_HCx attribute
res.cov_params_default = sw.cov_white_simple(self,
use_correction=False)
elif cov_type.lower() == 'hac':
maxlags = kwds['maxlags'] # required?, default in cov_hac_simple
res.cov_kwds['maxlags'] = maxlags
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
use_correction = kwds.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = descriptions['HAC'].format(
maxlags=maxlags, correction=['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(self, nlags=maxlags,
weights_func=weights_func,
use_correction=use_correction)
elif cov_type.lower() == 'cluster':
#cluster robust standard errors, one- or two-way
groups = kwds['groups']
if not hasattr(groups, 'shape'):
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwds.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(self, groups,
use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:,0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(self, groups,
use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = descriptions['cluster']
elif cov_type.lower() == 'hac-panel':
#cluster robust standard errors
res.cov_kwds['time'] = time = kwds.get('time', None)
res.cov_kwds['groups'] = groups = kwds.get('groups', None)
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
# TODO: clumsy time index in cov_nw_panel
if groups is not None:
groups = np.asarray(groups)
tt = (np.nonzero(groups[:-1] != groups[1:])[0] + 1).tolist()
nobs_ = len(groups)
elif time is not None:
# TODO: clumsy time index in cov_nw_panel
time = np.asarray(time)
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1).tolist()
nobs_ = len(time)
else:
raise ValueError('either time or groups needs to be given')
groupidx = lzip([0] + tt, tt + [nobs_])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Panel']
elif cov_type.lower() == 'hac-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(self, maxlags, time,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Groupsum']
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
# generic optional factor to scale covariance
res.cov_kwds['scaling_factor'] = sc_factor
if sc_factor is not None:
res.cov_params_default *= sc_factor
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res | create new results instance with robust covariance as default
Parameters
----------
cov_type : str
the type of robust sandwich estimator to use. see Notes below
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
kwds : depends on cov_type
Required or optional arguments for robust covariance calculation.
see Notes below
Returns
-------
results : results instance
This method creates a new results instance with the requested
robust covariance as the default covariance of the parameters.
Inferential statistics like p-values and hypothesis tests will be
based on this covariance matrix.
Notes
-----
Warning: Some of the options and defaults in cov_kwds may be changed in a
future version.
The covariance keywords provide an option 'scaling_factor' to adjust the
scaling of the covariance matrix, that is the covariance is multiplied by
this factor if it is given and is not `None`. This allows the user to
adjust the scaling of the covariance matrix to match other statistical
packages.
For example, `scaling_factor=(nobs - 1.) / (nobs - k_params)` provides a
correction so that the robust covariance matrices match those of Stata in
some models like GLM and discrete Models.
The following covariance types and required or optional arguments are
currently available:
- 'HC0', 'HC1', 'HC2', 'HC3': heteroscedasticity robust covariance
- no keyword arguments
- 'HAC': heteroskedasticity-autocorrelation robust covariance
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
kernels currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
``use_correction``: bool, optional
If true, use small sample correction
- 'cluster': clustered covariance estimator
``groups`` : array_like[int], required :
Integer-valued index of clusters or groups.
``use_correction``: bool, optional
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
``df_correction``: bool, optional
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is also
adjusted. When `use_t` is also True, then pvalues are
computed using the Student's t distribution using the
corrected values. These may differ substantially from
p-values based on the normal is the number of groups is
small.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum': Driscoll and Kraay, heteroscedasticity and
autocorrelation robust covariance for panel data
# TODO: more options needed here
``time`` : array_like, required
index of time periods
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
The available kernels are ['bartlett', 'uniform']. The default is
Bartlett.
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the the sandwich covariance is calculated without small
sample correction. If `use_correction = 'cluster'` (default),
then the same small sample correction as in the case of
`covtype='cluster'` is used.
``df_correction`` : bool, optional
The adjustment to df_resid, see cov_type 'cluster' above
- 'hac-panel': heteroscedasticity and autocorrelation robust standard
errors in panel data. The data needs to be sorted in this case, the
time series for each panel unit or cluster need to be stacked. The
membership to a time series of an individual or group can be either
specified by group indicators or by increasing time periods. One of
``groups`` or ``time`` is required. # TODO: we need more options here
``groups`` : array_like[int]
indicator for groups
``time`` : array_like[int]
index of time periods
``maxlags`` : int, required
number of lags to use
``kernel`` : {callable, str}, optional
Available kernels are ['bartlett', 'uniform'], default
is Bartlett
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the sandwich covariance is calculated without
small sample correction.
``df_correction`` : bool, optional
Adjustment to df_resid, see cov_type 'cluster' above
**Reminder**: ``use_correction`` in "hac-groupsum" and "hac-panel" is
not bool, needs to be in {False, 'hac', 'cluster'}.
.. todo:: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx` | get_robustcov_results | python | statsmodels/statsmodels | statsmodels/base/covtype.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/covtype.py | BSD-3-Clause |
def _lm_robust(score, constraint_matrix, score_deriv_inv, cov_score,
cov_params=None):
'''general formula for score/LM test
generalized score or lagrange multiplier test for implicit constraints
`r(params) = 0`, with gradient `R = d r / d params`
linear constraints are given by `R params - q = 0`
It is assumed that all arrays are evaluated at the constrained estimates.
Parameters
----------
score : ndarray, 1-D
derivative of objective function at estimated parameters
of constrained model
constraint_matrix R : ndarray
Linear restriction matrix or Jacobian of nonlinear constraints
score_deriv_inv, Ainv : ndarray, symmetric, square
inverse of second derivative of objective function
TODO: could be inverse of OPG or any other estimator if information
matrix equality holds
cov_score B : ndarray, symmetric, square
covariance matrix of the score. This is the inner part of a sandwich
estimator.
cov_params V : ndarray, symmetric, square
covariance of full parameter vector evaluated at constrained parameter
estimate. This can be specified instead of cov_score B.
Returns
-------
lm_stat : float
score/lagrange multiplier statistic
p-value : float
p-value of the LM test based on chisquare distribution
Notes
-----
'''
# shorthand alias
R, Ainv, B, V = constraint_matrix, score_deriv_inv, cov_score, cov_params
k_constraints = np.linalg.matrix_rank(R)
tmp = R.dot(Ainv)
wscore = tmp.dot(score) # C Ainv score
if B is None and V is None:
# only Ainv is given, so we assume information matrix identity holds
# computational short cut, should be same if Ainv == inv(B)
lm_stat = score.dot(Ainv.dot(score))
else:
# information matrix identity does not hold
if V is None:
inner = tmp.dot(B).dot(tmp.T)
else:
inner = R.dot(V).dot(R.T)
#lm_stat2 = wscore.dot(np.linalg.pinv(inner).dot(wscore))
# Let's assume inner is invertible, TODO: check if usecase for pinv exists
lm_stat = wscore.dot(np.linalg.solve(inner, wscore))
pval = stats.chi2.sf(lm_stat, k_constraints)
return lm_stat, pval, k_constraints | general formula for score/LM test
generalized score or lagrange multiplier test for implicit constraints
`r(params) = 0`, with gradient `R = d r / d params`
linear constraints are given by `R params - q = 0`
It is assumed that all arrays are evaluated at the constrained estimates.
Parameters
----------
score : ndarray, 1-D
derivative of objective function at estimated parameters
of constrained model
constraint_matrix R : ndarray
Linear restriction matrix or Jacobian of nonlinear constraints
score_deriv_inv, Ainv : ndarray, symmetric, square
inverse of second derivative of objective function
TODO: could be inverse of OPG or any other estimator if information
matrix equality holds
cov_score B : ndarray, symmetric, square
covariance matrix of the score. This is the inner part of a sandwich
estimator.
cov_params V : ndarray, symmetric, square
covariance of full parameter vector evaluated at constrained parameter
estimate. This can be specified instead of cov_score B.
Returns
-------
lm_stat : float
score/lagrange multiplier statistic
p-value : float
p-value of the LM test based on chisquare distribution
Notes
----- | _lm_robust | python | statsmodels/statsmodels | statsmodels/base/_parameter_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_parameter_inference.py | BSD-3-Clause |
def score_test(self, exog_extra=None, params_constrained=None,
hypothesis='joint', cov_type=None, cov_kwds=None,
k_constraints=None, r_matrix=None, scale=None, observed=True):
"""score test for restrictions or for omitted variables
Null Hypothesis : constraints are satisfied
Alternative Hypothesis : at least one of the constraints does not hold
This allows to specify restricted and unrestricted model properties in
three different ways
- fit_constrained result: model contains score and hessian function for
the full, unrestricted model, but the parameter estimate in the results
instance is for the restricted model. This is the case if the model
was estimated with fit_constrained.
- restricted model with variable addition: If exog_extra is not None, then
it is assumed that the current model is a model with zero restrictions
and the unrestricted model is given by adding exog_extra as additional
explanatory variables.
- unrestricted model with restricted parameters explicitly provided. If
params_constrained is not None, then the model is assumed to be for the
unrestricted model, but the provided parameters are for the restricted
model.
TODO: This case will currently only work for `nonrobust` cov_type,
otherwise we will also need the restriction matrix provided by the user.
Parameters
----------
exog_extra : None or array_like
Explanatory variables that are jointly tested for inclusion in the
model, i.e. omitted variables.
params_constrained : array_like
estimated parameter of the restricted model. This can be the
parameter estimate for the current when testing for omitted
variables.
hypothesis : str, 'joint' (default) or 'separate'
If hypothesis is 'joint', then the chisquare test results for the
joint hypothesis that all constraints hold is returned.
If hypothesis is 'joint', then z-test results for each constraint
is returned.
This is currently only implemented for cov_type="nonrobust".
cov_type : str
Warning: only partially implemented so far, currently only "nonrobust"
and "HC0" are supported.
If cov_type is None, then the cov_type specified in fit for the Wald
tests is used.
If the cov_type argument is not None, then it will be used instead of
the Wald cov_type given in fit.
k_constraints : int or None
Number of constraints that were used in the estimation of params
restricted relative to the number of exog in the model.
This must be provided if no exog_extra are given. If exog_extra is
not None, then k_constraints is assumed to be zero if it is None.
observed : bool
If True, then the observed Hessian is used in calculating the
covariance matrix of the score. If false then the expected
information matrix is used. This currently only applies to GLM where
EIM is available.
Warning: This option might still change.
Returns
-------
chi2_stat : float
chisquare statistic for the score test
p-value : float
P-value of the score test based on the chisquare distribution.
df : int
Degrees of freedom used in the p-value calculation. This is equal
to the number of constraints.
Notes
-----
Status: experimental, several options are not implemented yet or are not
verified yet. Currently available ptions might also still change.
cov_type is 'nonrobust':
The covariance matrix for the score is based on the Hessian, i.e.
observed information matrix or optionally on the expected information
matrix.
cov_type is 'HC0'
The covariance matrix of the score is the simple empirical covariance of
score_obs without degrees of freedom correction.
"""
# TODO: we are computing unnecessary things for cov_type nonrobust
if hasattr(self, "_results"):
# use numpy if we have wrapper, not relevant if method
self = self._results
model = self.model
nobs = model.endog.shape[0] # model.nobs
# discrete Poisson does not have nobs
if params_constrained is None:
params_constrained = self.params
cov_type = cov_type if cov_type is not None else self.cov_type
if observed is False:
hess_kwd = {'observed': False}
else:
hess_kwd = {}
if exog_extra is None:
if hasattr(self, 'constraints'):
if isinstance(self.constraints, tuple):
r_matrix = self.constraints[0]
else:
r_matrix = self.constraints.coefs
k_constraints = r_matrix.shape[0]
else:
if k_constraints is None:
raise ValueError('if exog_extra is None, then k_constraints'
'needs to be given')
# we need to use results scale as additional parameter
if scale is not None:
# we need to use results scale as additional parameter, gh #7840
score_kwd = {'scale': scale}
hess_kwd['scale'] = scale
else:
score_kwd = {}
# duplicate computation of score, might not be needed
score = model.score(params_constrained, **score_kwd)
score_obs = model.score_obs(params_constrained, **score_kwd)
hessian = model.hessian(params_constrained, **hess_kwd)
else:
if cov_type == 'V':
raise ValueError('if exog_extra is not None, then cov_type cannot '
'be V')
if hasattr(self, 'constraints'):
raise NotImplementedError('if exog_extra is not None, then self'
'should not be a constrained fit result')
if isinstance(exog_extra, tuple):
sh = _scorehess_extra(self, params_constrained, *exog_extra,
hess_kwds=hess_kwd)
score_obs, hessian, k_constraints, r_matrix = sh
score = score_obs.sum(0)
else:
exog_extra = np.asarray(exog_extra)
k_constraints = 0
ex = np.column_stack((model.exog, exog_extra))
# this uses shape not matrix rank to determine k_constraints
# requires nonsingular (no added perfect collinearity)
k_constraints += ex.shape[1] - model.exog.shape[1]
# TODO use diag instead of full np.eye
r_matrix = np.eye(len(self.params) + k_constraints
)[-k_constraints:]
score_factor = model.score_factor(params_constrained)
if score_factor.ndim == 1:
score_obs = (score_factor[:, None] * ex)
else:
sf = score_factor
score_obs = np.column_stack((sf[:, :1] * ex, sf[:, 1:]))
score = score_obs.sum(0)
hessian_factor = model.hessian_factor(params_constrained,
**hess_kwd)
# see #4714
from statsmodels.genmod.generalized_linear_model import GLM
if isinstance(model, GLM):
hessian_factor *= -1
hessian = np.dot(ex.T * hessian_factor, ex)
if cov_type == 'nonrobust':
cov_score_test = -hessian
elif cov_type.upper() == 'HC0':
hinv = -np.linalg.inv(hessian)
cov_score = nobs * np.cov(score_obs.T)
# temporary to try out
lm = _lm_robust(score, r_matrix, hinv, cov_score, cov_params=None)
return lm
# alternative is to use only the center, but it is singular
# https://github.com/statsmodels/statsmodels/pull/2096#issuecomment-393646205
# cov_score_test_inv = cov_lm_robust(score, r_matrix, hinv,
# cov_score, cov_params=None)
elif cov_type.upper() == 'V':
# TODO: this does not work, V in fit_constrained results is singular
# we need cov_params without the zeros in it
hinv = -np.linalg.inv(hessian)
cov_score = nobs * np.cov(score_obs.T)
V = self.cov_params_default
# temporary to try out
chi2stat = _lm_robust(score, r_matrix, hinv, cov_score, cov_params=V)
pval = stats.chi2.sf(chi2stat, k_constraints)
return chi2stat, pval
else:
msg = 'Only cov_type "nonrobust" and "HC0" are available.'
raise NotImplementedError(msg)
if hypothesis == 'joint':
chi2stat = score.dot(np.linalg.solve(cov_score_test, score[:, None]))
pval = stats.chi2.sf(chi2stat, k_constraints)
# return a stats results instance instead? Contrast?
return chi2stat, pval, k_constraints
elif hypothesis == 'separate':
diff = score
bse = np.sqrt(np.diag(cov_score_test))
stat = diff / bse
pval = stats.norm.sf(np.abs(stat))*2
return stat, pval
else:
raise NotImplementedError('only hypothesis "joint" is available') | score test for restrictions or for omitted variables
Null Hypothesis : constraints are satisfied
Alternative Hypothesis : at least one of the constraints does not hold
This allows to specify restricted and unrestricted model properties in
three different ways
- fit_constrained result: model contains score and hessian function for
the full, unrestricted model, but the parameter estimate in the results
instance is for the restricted model. This is the case if the model
was estimated with fit_constrained.
- restricted model with variable addition: If exog_extra is not None, then
it is assumed that the current model is a model with zero restrictions
and the unrestricted model is given by adding exog_extra as additional
explanatory variables.
- unrestricted model with restricted parameters explicitly provided. If
params_constrained is not None, then the model is assumed to be for the
unrestricted model, but the provided parameters are for the restricted
model.
TODO: This case will currently only work for `nonrobust` cov_type,
otherwise we will also need the restriction matrix provided by the user.
Parameters
----------
exog_extra : None or array_like
Explanatory variables that are jointly tested for inclusion in the
model, i.e. omitted variables.
params_constrained : array_like
estimated parameter of the restricted model. This can be the
parameter estimate for the current when testing for omitted
variables.
hypothesis : str, 'joint' (default) or 'separate'
If hypothesis is 'joint', then the chisquare test results for the
joint hypothesis that all constraints hold is returned.
If hypothesis is 'joint', then z-test results for each constraint
is returned.
This is currently only implemented for cov_type="nonrobust".
cov_type : str
Warning: only partially implemented so far, currently only "nonrobust"
and "HC0" are supported.
If cov_type is None, then the cov_type specified in fit for the Wald
tests is used.
If the cov_type argument is not None, then it will be used instead of
the Wald cov_type given in fit.
k_constraints : int or None
Number of constraints that were used in the estimation of params
restricted relative to the number of exog in the model.
This must be provided if no exog_extra are given. If exog_extra is
not None, then k_constraints is assumed to be zero if it is None.
observed : bool
If True, then the observed Hessian is used in calculating the
covariance matrix of the score. If false then the expected
information matrix is used. This currently only applies to GLM where
EIM is available.
Warning: This option might still change.
Returns
-------
chi2_stat : float
chisquare statistic for the score test
p-value : float
P-value of the score test based on the chisquare distribution.
df : int
Degrees of freedom used in the p-value calculation. This is equal
to the number of constraints.
Notes
-----
Status: experimental, several options are not implemented yet or are not
verified yet. Currently available ptions might also still change.
cov_type is 'nonrobust':
The covariance matrix for the score is based on the Hessian, i.e.
observed information matrix or optionally on the expected information
matrix.
cov_type is 'HC0'
The covariance matrix of the score is the simple empirical covariance of
score_obs without degrees of freedom correction. | score_test | python | statsmodels/statsmodels | statsmodels/base/_parameter_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_parameter_inference.py | BSD-3-Clause |
def _scorehess_extra(self, params=None, exog_extra=None,
exog2_extra=None, hess_kwds=None):
"""Experimental helper function for variable addition score test.
This uses score and hessian factor at the params which should be the
params of the restricted model.
"""
if hess_kwds is None:
hess_kwds = {}
# this corresponds to a model methods, so we need only the model
model = self.model
# as long as we have results instance, we can take params from it
if params is None:
params = self.params
# get original exog from model, currently only if exactly 2
exog_o1, exog_o2 = model._get_exogs()
if exog_o2 is None:
# if extra params is scalar, as in NB, GPP
exog_o2 = np.ones((exog_o1.shape[0], 1))
k_mean = exog_o1.shape[1]
k_prec = exog_o2.shape[1]
if exog_extra is not None:
exog = np.column_stack((exog_o1, exog_extra))
else:
exog = exog_o1
if exog2_extra is not None:
exog2 = np.column_stack((exog_o2, exog2_extra))
else:
exog2 = exog_o2
k_mean_new = exog.shape[1]
k_prec_new = exog2.shape[1]
k_cm = k_mean_new - k_mean
k_cp = k_prec_new - k_prec
k_constraints = k_cm + k_cp
index_mean = np.arange(k_mean, k_mean_new)
index_prec = np.arange(k_mean_new + k_prec, k_mean_new + k_prec_new)
r_matrix = np.zeros((k_constraints, len(params) + k_constraints))
# print(exog.shape, exog2.shape)
# print(r_matrix.shape, k_cm, k_cp, k_mean_new, k_prec_new)
# print(index_mean, index_prec)
r_matrix[:k_cm, index_mean] = np.eye(k_cm)
r_matrix[k_cm: k_cm + k_cp, index_prec] = np.eye(k_cp)
if hasattr(model, "score_hessian_factor"):
sf, hf = model.score_hessian_factor(params, return_hessian=True,
**hess_kwds)
else:
sf = model.score_factor(params)
hf = model.hessian_factor(params, **hess_kwds)
sf1, sf2 = sf
hf11, hf12, hf22 = hf
# elementwise product for each row (observation)
d1 = sf1[:, None] * exog
d2 = sf2[:, None] * exog2
score_obs = np.column_stack((d1, d2))
# elementwise product for each row (observation)
d11 = (exog.T * hf11).dot(exog)
d12 = (exog.T * hf12).dot(exog2)
d22 = (exog2.T * hf22).dot(exog2)
hessian = np.block([[d11, d12], [d12.T, d22]])
return score_obs, hessian, k_constraints, r_matrix | Experimental helper function for variable addition score test.
This uses score and hessian factor at the params which should be the
params of the restricted model. | _scorehess_extra | python | statsmodels/statsmodels | statsmodels/base/_parameter_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_parameter_inference.py | BSD-3-Clause |
def tic(results):
"""Takeuchi information criterion for misspecified models
"""
imr = getattr(results, "im_ratio", im_ratio(results))
tic = - 2 * results.llf + 2 * np.trace(imr)
return tic | Takeuchi information criterion for misspecified models | tic | python | statsmodels/statsmodels | statsmodels/base/_parameter_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_parameter_inference.py | BSD-3-Clause |
def gbic(results, gbicp=False):
"""generalized BIC for misspecified models
References
----------
Lv, Jinchi, and Jun S. Liu. 2014. "Model Selection Principles in
Misspecified Models." Journal of the Royal Statistical Society.
Series B (Statistical Methodology) 76 (1): 141–67.
"""
self = getattr(results, "_results", results)
k_params = self.df_model + 1
nobs = k_params + self.df_resid
imr = getattr(results, "im_ratio", im_ratio(results))
imr_logdet = np.linalg.slogdet(imr)[1]
gbic = -2 * self.llf + k_params * np.log(nobs) - imr_logdet # LL equ. (20)
gbicp = gbic + np.trace(imr) # LL equ. (23)
return gbic, gbicp | generalized BIC for misspecified models
References
----------
Lv, Jinchi, and Jun S. Liu. 2014. "Model Selection Principles in
Misspecified Models." Journal of the Royal Statistical Society.
Series B (Statistical Methodology) 76 (1): 141–67. | gbic | python | statsmodels/statsmodels | statsmodels/base/_parameter_inference.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_parameter_inference.py | BSD-3-Clause |
def _est_regularized_naive(mod, pnum, partitions, fit_kwds=None):
"""estimates the regularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
Returns
-------
An array of the parameters for the regularized fit
"""
if fit_kwds is None:
raise ValueError("_est_regularized_naive currently " +
"requires that fit_kwds not be None.")
return mod.fit_regularized(**fit_kwds).params | estimates the regularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
Returns
-------
An array of the parameters for the regularized fit | _est_regularized_naive | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _est_unregularized_naive(mod, pnum, partitions, fit_kwds=None):
"""estimates the unregularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit
Returns
-------
An array of the parameters for the fit
"""
if fit_kwds is None:
raise ValueError("_est_unregularized_naive currently " +
"requires that fit_kwds not be None.")
return mod.fit(**fit_kwds).params | estimates the unregularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit
Returns
-------
An array of the parameters for the fit | _est_unregularized_naive | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _join_naive(params_l, threshold=0):
"""joins the results from each run of _est_<type>_naive
and returns the mean estimate of the coefficients
Parameters
----------
params_l : list
A list of arrays of coefficients.
threshold : scalar
The threshold at which the coefficients will be cut.
"""
p = len(params_l[0])
partitions = len(params_l)
params_mn = np.zeros(p)
for params in params_l:
params_mn += params
params_mn /= partitions
params_mn[np.abs(params_mn) < threshold] = 0
return params_mn | joins the results from each run of _est_<type>_naive
and returns the mean estimate of the coefficients
Parameters
----------
params_l : list
A list of arrays of coefficients.
threshold : scalar
The threshold at which the coefficients will be cut. | _join_naive | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _calc_grad(mod, params, alpha, L1_wt, score_kwds):
"""calculates the log-likelihood gradient for the debiasing
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
score_kwds : dict-like or None
Keyword arguments for the score function.
Returns
-------
An array-like object of the same dimension as params
Notes
-----
In general:
gradient l_k(params)
where k corresponds to the index of the partition
For OLS:
X^T(y - X^T params)
"""
grad = -mod.score(np.asarray(params), **score_kwds)
grad += alpha * (1 - L1_wt)
return grad | calculates the log-likelihood gradient for the debiasing
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
score_kwds : dict-like or None
Keyword arguments for the score function.
Returns
-------
An array-like object of the same dimension as params
Notes
-----
In general:
gradient l_k(params)
where k corresponds to the index of the partition
For OLS:
X^T(y - X^T params) | _calc_grad | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _calc_wdesign_mat(mod, params, hess_kwds):
"""calculates the weighted design matrix necessary to generate
the approximate inverse covariance matrix
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
hess_kwds : dict-like or None
Keyword arguments for the hessian function.
Returns
-------
An array-like object, updated design matrix, same dimension
as mod.exog
"""
rhess = np.sqrt(mod.hessian_factor(np.asarray(params), **hess_kwds))
return rhess[:, None] * mod.exog | calculates the weighted design matrix necessary to generate
the approximate inverse covariance matrix
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
hess_kwds : dict-like or None
Keyword arguments for the hessian function.
Returns
-------
An array-like object, updated design matrix, same dimension
as mod.exog | _calc_wdesign_mat | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _est_regularized_debiased(mod, mnum, partitions, fit_kwds=None,
score_kwds=None, hess_kwds=None):
"""estimates the regularized fitted parameters, is the default
estimation_method for class DistributedModel.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
mnum : scalar
Index of current partition.
partitions : scalar
Total number of partitions.
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
Returns
-------
A tuple of parameters for regularized fit
An array-like object of the fitted parameters, params
An array-like object for the gradient
A list of array like objects for nodewise_row
A list of array like objects for nodewise_weight
"""
score_kwds = {} if score_kwds is None else score_kwds
hess_kwds = {} if hess_kwds is None else hess_kwds
if fit_kwds is None:
raise ValueError("_est_regularized_debiased currently " +
"requires that fit_kwds not be None.")
else:
alpha = fit_kwds["alpha"]
if "L1_wt" in fit_kwds:
L1_wt = fit_kwds["L1_wt"]
else:
L1_wt = 1
nobs, p = mod.exog.shape
p_part = int(np.ceil((1. * p) / partitions))
params = mod.fit_regularized(**fit_kwds).params
grad = _calc_grad(mod, params, alpha, L1_wt, score_kwds) / nobs
wexog = _calc_wdesign_mat(mod, params, hess_kwds)
nodewise_row_l = []
nodewise_weight_l = []
for idx in range(mnum * p_part, min((mnum + 1) * p_part, p)):
nodewise_row = _calc_nodewise_row(wexog, idx, alpha)
nodewise_row_l.append(nodewise_row)
nodewise_weight = _calc_nodewise_weight(wexog, nodewise_row, idx,
alpha)
nodewise_weight_l.append(nodewise_weight)
return params, grad, nodewise_row_l, nodewise_weight_l | estimates the regularized fitted parameters, is the default
estimation_method for class DistributedModel.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
mnum : scalar
Index of current partition.
partitions : scalar
Total number of partitions.
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
Returns
-------
A tuple of parameters for regularized fit
An array-like object of the fitted parameters, params
An array-like object for the gradient
A list of array like objects for nodewise_row
A list of array like objects for nodewise_weight | _est_regularized_debiased | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _join_debiased(results_l, threshold=0):
"""joins the results from each run of _est_regularized_debiased
and returns the debiased estimate of the coefficients
Parameters
----------
results_l : list
A list of tuples each one containing the params, grad,
nodewise_row and nodewise_weight values for each partition.
threshold : scalar
The threshold at which the coefficients will be cut.
"""
p = len(results_l[0][0])
partitions = len(results_l)
params_mn = np.zeros(p)
grad_mn = np.zeros(p)
nodewise_row_l = []
nodewise_weight_l = []
for r in results_l:
params_mn += r[0]
grad_mn += r[1]
nodewise_row_l.extend(r[2])
nodewise_weight_l.extend(r[3])
nodewise_row_l = np.array(nodewise_row_l)
nodewise_weight_l = np.array(nodewise_weight_l)
params_mn /= partitions
grad_mn *= -1. / partitions
approx_inv_cov = _calc_approx_inv_cov(nodewise_row_l, nodewise_weight_l)
debiased_params = params_mn + approx_inv_cov.dot(grad_mn)
debiased_params[np.abs(debiased_params) < threshold] = 0
return debiased_params | joins the results from each run of _est_regularized_debiased
and returns the debiased estimate of the coefficients
Parameters
----------
results_l : list
A list of tuples each one containing the params, grad,
nodewise_row and nodewise_weight values for each partition.
threshold : scalar
The threshold at which the coefficients will be cut. | _join_debiased | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.