code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def bse_fe(self):
"""
Returns the standard errors of the fixed effect regression
coefficients.
"""
p = self.model.exog.shape[1]
return np.sqrt(np.diag(self.cov_params())[0:p]) | Returns the standard errors of the fixed effect regression
coefficients. | bse_fe | python | statsmodels/statsmodels | statsmodels/regression/mixed_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py | BSD-3-Clause |
def bse_re(self):
"""
Returns the standard errors of the variance parameters.
The first `k_re x (k_re + 1)` elements of the returned array
are the standard errors of the lower triangle of `cov_re`.
The remaining elements are the standard errors of the variance
components.
Note that the sampling distribution of variance parameters is
strongly skewed unless the sample size is large, so these
standard errors may not give meaningful confidence intervals
or p-values if used in the usual way.
"""
p = self.model.exog.shape[1]
return np.sqrt(self.scale * np.diag(self.cov_params())[p:]) | Returns the standard errors of the variance parameters.
The first `k_re x (k_re + 1)` elements of the returned array
are the standard errors of the lower triangle of `cov_re`.
The remaining elements are the standard errors of the variance
components.
Note that the sampling distribution of variance parameters is
strongly skewed unless the sample size is large, so these
standard errors may not give meaningful confidence intervals
or p-values if used in the usual way. | bse_re | python | statsmodels/statsmodels | statsmodels/regression/mixed_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py | BSD-3-Clause |
def random_effects(self):
"""
The conditional means of random effects given the data.
Returns
-------
random_effects : dict
A dictionary mapping the distinct `group` values to the
conditional means of the random effects for the group
given the data.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
raise ValueError("Cannot predict random effects from " +
"singular covariance structure.")
vcomp = self.vcomp
k_re = self.k_re
ranef_dict = {}
for group_ix, group in enumerate(self.model.group_labels):
endog = self.model.endog_li[group_ix]
exog = self.model.exog_li[group_ix]
ex_r = self.model._aex_r[group_ix]
ex2_r = self.model._aex_r2[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group_ix)
# Get the residuals relative to fixed effects
resid = endog
if self.k_fe > 0:
expval = np.dot(exog, self.fe_params)
resid = resid - expval
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv,
1 / vc_var)
vir = solver(resid)
xtvir = _dot(ex_r.T, vir)
xtvir[0:k_re] = np.dot(self.cov_re, xtvir[0:k_re])
xtvir[k_re:] *= vc_var
ranef_dict[group] = pd.Series(
xtvir, index=self._expand_re_names(group_ix))
return ranef_dict | The conditional means of random effects given the data.
Returns
-------
random_effects : dict
A dictionary mapping the distinct `group` values to the
conditional means of the random effects for the group
given the data. | random_effects | python | statsmodels/statsmodels | statsmodels/regression/mixed_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py | BSD-3-Clause |
def random_effects_cov(self):
"""
Returns the conditional covariance matrix of the random
effects for each group given the data.
Returns
-------
random_effects_cov : dict
A dictionary mapping the distinct values of the `group`
variable to the conditional covariance matrix of the
random effects given the data.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
vcomp = self.vcomp
ranef_dict = {}
for group_ix in range(self.model.n_groups):
ex_r = self.model._aex_r[group_ix]
ex2_r = self.model._aex_r2[group_ix]
label = self.model.group_labels[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group_ix)
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv,
1 / vc_var)
n = ex_r.shape[0]
m = self.cov_re.shape[0]
mat1 = np.empty((n, m + len(vc_var)))
mat1[:, 0:m] = np.dot(ex_r[:, 0:m], self.cov_re)
mat1[:, m:] = np.dot(ex_r[:, m:], np.diag(vc_var))
mat2 = solver(mat1)
mat2 = np.dot(mat1.T, mat2)
v = -mat2
v[0:m, 0:m] += self.cov_re
ix = np.arange(m, v.shape[0])
v[ix, ix] += vc_var
na = self._expand_re_names(group_ix)
v = pd.DataFrame(v, index=na, columns=na)
ranef_dict[label] = v
return ranef_dict | Returns the conditional covariance matrix of the random
effects for each group given the data.
Returns
-------
random_effects_cov : dict
A dictionary mapping the distinct values of the `group`
variable to the conditional covariance matrix of the
random effects given the data. | random_effects_cov | python | statsmodels/statsmodels | statsmodels/regression/mixed_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py | BSD-3-Clause |
def t_test(self, r_matrix, use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q
Parameters
----------
r_matrix : array_like
If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
scale : float, optional
An optional `scale` to use. Default is the scale specified
by the model fit.
use_t : bool, optional
If use_t is None, then the default of the model is used.
If use_t is True, then the p-values are based on the t
distribution.
If use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
"""
if r_matrix.shape[1] != self.k_fe:
raise ValueError("r_matrix for t-test should have %d columns"
% self.k_fe)
d = self.k_re2 + self.k_vc
z0 = np.zeros((r_matrix.shape[0], d))
r_matrix = np.concatenate((r_matrix, z0), axis=1)
tst_rslt = super().t_test(r_matrix, use_t=use_t)
return tst_rslt | Compute a t-test for a each linear hypothesis of the form Rb = q
Parameters
----------
r_matrix : array_like
If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
scale : float, optional
An optional `scale` to use. Default is the scale specified
by the model fit.
use_t : bool, optional
If use_t is None, then the default of the model is used.
If use_t is True, then the p-values are based on the t
distribution.
If use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`. | t_test | python | statsmodels/statsmodels | statsmodels/regression/mixed_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py | BSD-3-Clause |
def summary(self, yname=None, xname_fe=None, xname_re=None,
title=None, alpha=.05):
"""
Summarize the mixed model regression results.
Parameters
----------
yname : str, optional
Default is `y`
xname_fe : list[str], optional
Fixed effects covariate names
xname_re : list[str], optional
Random effects covariate names
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
info = {}
info["Model:"] = "MixedLM"
if yname is None:
yname = self.model.endog_names
param_names = self.model.data.param_names[:]
k_fe_params = len(self.fe_params)
k_re_params = len(param_names) - len(self.fe_params)
if xname_fe is not None:
if len(xname_fe) != k_fe_params:
msg = "xname_fe should be a list of length %d" % k_fe_params
raise ValueError(msg)
param_names[:k_fe_params] = xname_fe
if xname_re is not None:
if len(xname_re) != k_re_params:
msg = "xname_re should be a list of length %d" % k_re_params
raise ValueError(msg)
param_names[k_fe_params:] = xname_re
info["No. Observations:"] = str(self.model.n_totobs)
info["No. Groups:"] = str(self.model.n_groups)
gs = np.array([len(x) for x in self.model.endog_li])
info["Min. group size:"] = "%.0f" % min(gs)
info["Max. group size:"] = "%.0f" % max(gs)
info["Mean group size:"] = "%.1f" % np.mean(gs)
info["Dependent Variable:"] = yname
info["Method:"] = self.method
info["Scale:"] = self.scale
info["Log-Likelihood:"] = self.llf
info["Converged:"] = "Yes" if self.converged else "No"
smry.add_dict(info)
smry.add_title("Mixed Linear Model Regression Results")
float_fmt = "%.3f"
sdf = np.nan * np.ones((self.k_fe + self.k_re2 + self.k_vc, 6))
# Coefficient estimates
sdf[0:self.k_fe, 0] = self.fe_params
# Standard errors
sdf[0:self.k_fe, 1] = np.sqrt(np.diag(self.cov_params()[0:self.k_fe]))
# Z-scores
sdf[0:self.k_fe, 2] = sdf[0:self.k_fe, 0] / sdf[0:self.k_fe, 1]
# p-values
sdf[0:self.k_fe, 3] = 2 * norm.cdf(-np.abs(sdf[0:self.k_fe, 2]))
# Confidence intervals
qm = -norm.ppf(alpha / 2)
sdf[0:self.k_fe, 4] = sdf[0:self.k_fe, 0] - qm * sdf[0:self.k_fe, 1]
sdf[0:self.k_fe, 5] = sdf[0:self.k_fe, 0] + qm * sdf[0:self.k_fe, 1]
# All random effects variances and covariances
jj = self.k_fe
for i in range(self.k_re):
for j in range(i + 1):
sdf[jj, 0] = self.cov_re[i, j]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
# Variance components
for i in range(self.k_vc):
sdf[jj, 0] = self.vcomp[i]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
sdf = pd.DataFrame(index=param_names, data=sdf)
sdf.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
for col in sdf.columns:
sdf[col] = [float_fmt % x if np.isfinite(x) else ""
for x in sdf[col]]
smry.add_df(sdf, align='r')
return smry | Summarize the mixed model regression results.
Parameters
----------
yname : str, optional
Default is `y`
xname_fe : list[str], optional
Fixed effects covariate names
xname_re : list[str], optional
Random effects covariate names
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results | summary | python | statsmodels/statsmodels | statsmodels/regression/mixed_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py | BSD-3-Clause |
def aic(self):
"""Akaike information criterion"""
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * (self.llf - df) | Akaike information criterion | aic | python | statsmodels/statsmodels | statsmodels/regression/mixed_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py | BSD-3-Clause |
def bic(self):
"""Bayesian information criterion"""
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * self.llf + np.log(self.nobs) * df | Bayesian information criterion | bic | python | statsmodels/statsmodels | statsmodels/regression/mixed_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py | BSD-3-Clause |
def profile_re(self, re_ix, vtype, num_low=5, dist_low=1., num_high=5,
dist_high=1., **fit_kwargs):
"""
Profile-likelihood inference for variance parameters.
Parameters
----------
re_ix : int
If vtype is `re`, this value is the index of the variance
parameter for which to construct a profile likelihood. If
`vtype` is 'vc' then `re_ix` is the name of the variance
parameter to be profiled.
vtype : str
Either 're' or 'vc', depending on whether the profile
analysis is for a random effect or a variance component.
num_low : int
The number of points at which to calculate the likelihood
below the MLE of the parameter of interest.
dist_low : float
The distance below the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
num_high : int
The number of points at which to calculate the likelihood
above the MLE of the parameter of interest.
dist_high : float
The distance above the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
**fit_kwargs
Additional keyword arguments passed to fit.
Returns
-------
An array with two columns. The first column contains the
values to which the parameter of interest is constrained. The
second column contains the corresponding likelihood values.
Notes
-----
Only variance parameters can be profiled.
"""
pmodel = self.model
k_fe = pmodel.k_fe
k_re = pmodel.k_re
k_vc = pmodel.k_vc
endog, exog = pmodel.endog, pmodel.exog
# Need to permute the columns of the random effects design
# matrix so that the profiled variable is in the first column.
if vtype == 're':
ix = np.arange(k_re)
ix[0] = re_ix
ix[re_ix] = 0
exog_re = pmodel.exog_re.copy()[:, ix]
# Permute the covariance structure to match the permuted
# design matrix.
params = self.params_object.copy()
cov_re_unscaled = params.cov_re
cov_re_unscaled = cov_re_unscaled[np.ix_(ix, ix)]
params.cov_re = cov_re_unscaled
ru0 = cov_re_unscaled[0, 0]
# Convert dist_low and dist_high to the profile
# parameterization
cov_re = self.scale * cov_re_unscaled
low = (cov_re[0, 0] - dist_low) / self.scale
high = (cov_re[0, 0] + dist_high) / self.scale
elif vtype == 'vc':
re_ix = self.model.exog_vc.names.index(re_ix)
params = self.params_object.copy()
vcomp = self.vcomp
low = (vcomp[re_ix] - dist_low) / self.scale
high = (vcomp[re_ix] + dist_high) / self.scale
ru0 = vcomp[re_ix] / self.scale
# Define the sequence of values to which the parameter of
# interest will be constrained.
if low <= 0:
raise ValueError("dist_low is too large and would result in a "
"negative variance. Try a smaller value.")
left = np.linspace(low, ru0, num_low + 1)
right = np.linspace(ru0, high, num_high+1)[1:]
rvalues = np.concatenate((left, right))
# Indicators of which parameters are free and fixed.
free = MixedLMParams(k_fe, k_re, k_vc)
if self.freepat is None:
free.fe_params = np.ones(k_fe)
vcomp = np.ones(k_vc)
mat = np.ones((k_re, k_re))
else:
# If a freepat already has been specified, we add the
# constraint to it.
free.fe_params = self.freepat.fe_params
vcomp = self.freepat.vcomp
mat = self.freepat.cov_re
if vtype == 're':
mat = mat[np.ix_(ix, ix)]
if vtype == 're':
mat[0, 0] = 0
else:
vcomp[re_ix] = 0
free.cov_re = mat
free.vcomp = vcomp
klass = self.model.__class__
init_kwargs = pmodel._get_init_kwds()
if vtype == 're':
init_kwargs['exog_re'] = exog_re
likev = []
for x in rvalues:
model = klass(endog, exog, **init_kwargs)
if vtype == 're':
cov_re = params.cov_re.copy()
cov_re[0, 0] = x
params.cov_re = cov_re
else:
params.vcomp[re_ix] = x
# TODO should use fit_kwargs
rslt = model.fit(start_params=params, free=free,
reml=self.reml, cov_pen=self.cov_pen,
**fit_kwargs)._results
likev.append([x * rslt.scale, rslt.llf])
likev = np.asarray(likev)
return likev | Profile-likelihood inference for variance parameters.
Parameters
----------
re_ix : int
If vtype is `re`, this value is the index of the variance
parameter for which to construct a profile likelihood. If
`vtype` is 'vc' then `re_ix` is the name of the variance
parameter to be profiled.
vtype : str
Either 're' or 'vc', depending on whether the profile
analysis is for a random effect or a variance component.
num_low : int
The number of points at which to calculate the likelihood
below the MLE of the parameter of interest.
dist_low : float
The distance below the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
num_high : int
The number of points at which to calculate the likelihood
above the MLE of the parameter of interest.
dist_high : float
The distance above the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
**fit_kwargs
Additional keyword arguments passed to fit.
Returns
-------
An array with two columns. The first column contains the
values to which the parameter of interest is constrained. The
second column contains the corresponding likelihood values.
Notes
-----
Only variance parameters can be profiled. | profile_re | python | statsmodels/statsmodels | statsmodels/regression/mixed_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py | BSD-3-Clause |
def fit(self, method='pinv'):
"""
Minimal implementation of WLS optimized for performance.
Parameters
----------
method : str, optional
Method to use to estimate parameters. "pinv", "qr" or "lstsq"
* "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem.
* "qr" uses the QR factorization.
* "lstsq" uses the least squares implementation in numpy.linalg
Returns
-------
results : namedtuple
Named tuple containing the fewest terms needed to implement
iterative estimation in models. Currently
* params : Estimated parameters
* fittedvalues : Fit values using original data
* resid : Residuals using original data
* model : namedtuple with one field, weights
* scale : scale computed using weighted residuals
Notes
-----
Does not perform and checks on the input data
See Also
--------
statsmodels.regression.linear_model.WLS
"""
if method == 'pinv':
pinv_wexog = np.linalg.pinv(self.wexog)
params = pinv_wexog.dot(self.wendog)
elif method == 'qr':
Q, R = np.linalg.qr(self.wexog)
params = np.linalg.solve(R, np.dot(Q.T, self.wendog))
else:
params, _, _, _ = np.linalg.lstsq(self.wexog, self.wendog,
rcond=-1)
return self.results(params) | Minimal implementation of WLS optimized for performance.
Parameters
----------
method : str, optional
Method to use to estimate parameters. "pinv", "qr" or "lstsq"
* "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem.
* "qr" uses the QR factorization.
* "lstsq" uses the least squares implementation in numpy.linalg
Returns
-------
results : namedtuple
Named tuple containing the fewest terms needed to implement
iterative estimation in models. Currently
* params : Estimated parameters
* fittedvalues : Fit values using original data
* resid : Residuals using original data
* model : namedtuple with one field, weights
* scale : scale computed using weighted residuals
Notes
-----
Does not perform and checks on the input data
See Also
--------
statsmodels.regression.linear_model.WLS | fit | python | statsmodels/statsmodels | statsmodels/regression/_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/_tools.py | BSD-3-Clause |
def results(self, params):
"""
Construct results
params : ndarray
Model parameters
Notes
-----
Allows results to be constructed from either existing parameters or
when estimated using using ``fit``
"""
fitted_values = self.exog.dot(params)
resid = self.endog - fitted_values
wresid = self.wendog - self.wexog.dot(params)
df_resid = self.wexog.shape[0] - self.wexog.shape[1]
scale = np.dot(wresid, wresid) / df_resid
return Bunch(params=params, fittedvalues=fitted_values, resid=resid,
model=self, scale=scale) | Construct results
params : ndarray
Model parameters
Notes
-----
Allows results to be constructed from either existing parameters or
when estimated using using ``fit`` | results | python | statsmodels/statsmodels | statsmodels/regression/_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/_tools.py | BSD-3-Clause |
def whiten(self, data):
"""
QuantReg model whitener does nothing: returns data.
"""
return data | QuantReg model whitener does nothing: returns data. | whiten | python | statsmodels/statsmodels | statsmodels/regression/quantile_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/quantile_regression.py | BSD-3-Clause |
def fit(self, q=.5, vcov='robust', kernel='epa', bandwidth='hsheather',
max_iter=1000, p_tol=1e-6, **kwargs):
"""
Solve by Iterative Weighted Least Squares
Parameters
----------
q : float
Quantile must be strictly between 0 and 1
vcov : str, method used to calculate the variance-covariance matrix
of the parameters. Default is ``robust``:
- robust : heteroskedasticity robust standard errors (as suggested
in Greene 6th edition)
- iid : iid errors (as in Stata 12)
kernel : str, kernel to use in the kernel density estimation for the
asymptotic covariance matrix:
- epa: Epanechnikov
- cos: Cosine
- gau: Gaussian
- par: Parzene
bandwidth : str, Bandwidth selection method in kernel density
estimation for asymptotic covariance estimate (full
references in QuantReg docstring):
- hsheather: Hall-Sheather (1988)
- bofinger: Bofinger (1975)
- chamberlain: Chamberlain (1994)
"""
if q <= 0 or q >= 1:
raise Exception('q must be strictly between 0 and 1')
kern_names = ['biw', 'cos', 'epa', 'gau', 'par']
if kernel not in kern_names:
raise Exception("kernel must be one of " + ', '.join(kern_names))
else:
kernel = kernels[kernel]
if bandwidth == 'hsheather':
bandwidth = hall_sheather
elif bandwidth == 'bofinger':
bandwidth = bofinger
elif bandwidth == 'chamberlain':
bandwidth = chamberlain
else:
raise Exception("bandwidth must be in 'hsheather', 'bofinger', 'chamberlain'")
endog = self.endog
exog = self.exog
nobs = self.nobs
exog_rank = np.linalg.matrix_rank(self.exog)
self.rank = exog_rank
self.df_model = float(self.rank - self.k_constant)
self.df_resid = self.nobs - self.rank
n_iter = 0
xstar = exog
beta = np.ones(exog.shape[1])
# TODO: better start, initial beta is used only for convergence check
# Note the following does not work yet,
# the iteration loop always starts with OLS as initial beta
# if start_params is not None:
# if len(start_params) != rank:
# raise ValueError('start_params has wrong length')
# beta = start_params
# else:
# # start with OLS
# beta = np.dot(np.linalg.pinv(exog), endog)
diff = 10
cycle = False
history = dict(params = [], mse=[])
while n_iter < max_iter and diff > p_tol and not cycle:
n_iter += 1
beta0 = beta
xtx = np.dot(xstar.T, exog)
xty = np.dot(xstar.T, endog)
beta = np.dot(pinv(xtx), xty)
resid = endog - np.dot(exog, beta)
mask = np.abs(resid) < .000001
resid[mask] = ((resid[mask] >= 0) * 2 - 1) * .000001
resid = np.where(resid < 0, q * resid, (1-q) * resid)
resid = np.abs(resid)
xstar = exog / resid[:, np.newaxis]
diff = np.max(np.abs(beta - beta0))
history['params'].append(beta)
history['mse'].append(np.mean(resid*resid))
if (n_iter >= 300) and (n_iter % 100 == 0):
# check for convergence circle, should not happen
for ii in range(2, 10):
if np.all(beta == history['params'][-ii]):
cycle = True
warnings.warn("Convergence cycle detected", ConvergenceWarning)
break
if n_iter == max_iter:
warnings.warn("Maximum number of iterations (" + str(max_iter) +
") reached.", IterationLimitWarning)
e = endog - np.dot(exog, beta)
# Greene (2008, p.407) writes that Stata 6 uses this bandwidth:
# h = 0.9 * np.std(e) / (nobs**0.2)
# Instead, we calculate bandwidth as in Stata 12
iqre = stats.scoreatpercentile(e, 75) - stats.scoreatpercentile(e, 25)
h = bandwidth(nobs, q)
h = min(np.std(endog),
iqre / 1.34) * (norm.ppf(q + h) - norm.ppf(q - h))
fhat0 = 1. / (nobs * h) * np.sum(kernel(e / h))
if vcov == 'robust':
d = np.where(e > 0, (q/fhat0)**2, ((1-q)/fhat0)**2)
xtxi = pinv(np.dot(exog.T, exog))
xtdx = np.dot(exog.T * d[np.newaxis, :], exog)
vcov = xtxi @ xtdx @ xtxi
elif vcov == 'iid':
vcov = (1. / fhat0)**2 * q * (1 - q) * pinv(np.dot(exog.T, exog))
else:
raise Exception("vcov must be 'robust' or 'iid'")
lfit = QuantRegResults(self, beta, normalized_cov_params=vcov)
lfit.q = q
lfit.iterations = n_iter
lfit.sparsity = 1. / fhat0
lfit.bandwidth = h
lfit.history = history
return RegressionResultsWrapper(lfit) | Solve by Iterative Weighted Least Squares
Parameters
----------
q : float
Quantile must be strictly between 0 and 1
vcov : str, method used to calculate the variance-covariance matrix
of the parameters. Default is ``robust``:
- robust : heteroskedasticity robust standard errors (as suggested
in Greene 6th edition)
- iid : iid errors (as in Stata 12)
kernel : str, kernel to use in the kernel density estimation for the
asymptotic covariance matrix:
- epa: Epanechnikov
- cos: Cosine
- gau: Gaussian
- par: Parzene
bandwidth : str, Bandwidth selection method in kernel density
estimation for asymptotic covariance estimate (full
references in QuantReg docstring):
- hsheather: Hall-Sheather (1988)
- bofinger: Bofinger (1975)
- chamberlain: Chamberlain (1994) | fit | python | statsmodels/statsmodels | statsmodels/regression/quantile_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/quantile_regression.py | BSD-3-Clause |
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
eigvals = self.eigenvals
condno = self.condition_number
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None)
]
top_right = [('Pseudo R-squared:', ["%#8.4g" % self.prsquared]),
('Bandwidth:', ["%#8.4g" % self.bandwidth]),
('Sparsity:', ["%#8.4g" % self.sparsity]),
('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None)
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
# create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
# add warnings/notes, added to text format only
etext = []
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: # TODO: what is recommended
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry | Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results | summary | python | statsmodels/statsmodels | statsmodels/regression/quantile_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/quantile_regression.py | BSD-3-Clause |
def fit(self):
"""
Fits the model by application of the Kalman filter
Returns
-------
RecursiveLSResults
"""
smoother_results = self.smooth(return_ssm=True)
with self.ssm.fixed_scale(smoother_results.scale):
res = self.smooth()
return res | Fits the model by application of the Kalman filter
Returns
-------
RecursiveLSResults | fit | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def update(self, params, **kwargs):
"""
Update the parameters of the model
Updates the representation matrices to fill in the new parameter
values.
Parameters
----------
params : array_like
Array of new parameters.
transformed : bool, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True..
Returns
-------
params : array_like
Array of parameters.
"""
pass | Update the parameters of the model
Updates the representation matrices to fill in the new parameter
values.
Parameters
----------
params : array_like
Array of new parameters.
transformed : bool, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True..
Returns
-------
params : array_like
Array of parameters. | update | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def recursive_coefficients(self):
"""
Estimates of regression coefficients, recursively estimated
Returns
-------
out: Bunch
Has the following attributes:
- `filtered`: a time series array with the filtered estimate of
the component
- `filtered_cov`: a time series array with the filtered estimate of
the variance/covariance of the component
- `smoothed`: a time series array with the smoothed estimate of
the component
- `smoothed_cov`: a time series array with the smoothed estimate of
the variance/covariance of the component
- `offset`: an integer giving the offset in the state vector where
this component begins
"""
out = None
spec = self.specification
start = offset = 0
end = offset + spec.k_exog
out = Bunch(
filtered=self.filtered_state[start:end],
filtered_cov=self.filtered_state_cov[start:end, start:end],
smoothed=None, smoothed_cov=None,
offset=offset
)
if self.smoothed_state is not None:
out.smoothed = self.smoothed_state[start:end]
if self.smoothed_state_cov is not None:
out.smoothed_cov = (
self.smoothed_state_cov[start:end, start:end])
return out | Estimates of regression coefficients, recursively estimated
Returns
-------
out: Bunch
Has the following attributes:
- `filtered`: a time series array with the filtered estimate of
the component
- `filtered_cov`: a time series array with the filtered estimate of
the variance/covariance of the component
- `smoothed`: a time series array with the smoothed estimate of
the component
- `smoothed_cov`: a time series array with the smoothed estimate of
the variance/covariance of the component
- `offset`: an integer giving the offset in the state vector where
this component begins | recursive_coefficients | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def llf_recursive_obs(self):
"""
(float) Loglikelihood at observation, computed from recursive residuals
"""
from scipy.stats import norm
return np.log(norm.pdf(self.resid_recursive, loc=0,
scale=self.scale**0.5)) | (float) Loglikelihood at observation, computed from recursive residuals | llf_recursive_obs | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def llf_recursive(self):
"""
(float) Loglikelihood defined by recursive residuals, equivalent to OLS
"""
return np.sum(self.llf_recursive_obs) | (float) Loglikelihood defined by recursive residuals, equivalent to OLS | llf_recursive | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def ssr(self):
"""ssr"""
d = max(self.nobs_diffuse, self.loglikelihood_burn)
return (self.nobs - d) * self.filter_results.obs_cov[0, 0, 0] | ssr | ssr | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def centered_tss(self):
"""Centered tss"""
return np.sum((self.filter_results.endog[0] -
np.mean(self.filter_results.endog))**2) | Centered tss | centered_tss | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def uncentered_tss(self):
"""uncentered tss"""
return np.sum((self.filter_results.endog[0])**2) | uncentered tss | uncentered_tss | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def ess(self):
"""ess"""
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr | ess | ess | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def rsquared(self):
"""rsquared"""
if self.k_constant:
return 1 - self.ssr / self.centered_tss
else:
return 1 - self.ssr / self.uncentered_tss | rsquared | rsquared | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def mse_model(self):
"""mse_model"""
return self.ess / self.df_model | mse_model | mse_model | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def mse_resid(self):
"""mse_resid"""
return self.ssr / self.df_resid | mse_resid | mse_resid | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def mse_total(self):
"""mse_total"""
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model) | mse_total | mse_total | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def _cusum_significance_bounds(self, alpha, ddof=0, points=None):
"""
Parameters
----------
alpha : float, optional
The significance bound is alpha %.
ddof : int, optional
The number of periods additional to `k_exog` to exclude in
constructing the bounds. Default is zero. This is usually used
only for testing purposes.
points : iterable, optional
The points at which to evaluate the significance bounds. Default is
two points, beginning and end of the sample.
Notes
-----
Comparing against the cusum6 package for Stata, this does not produce
exactly the same confidence bands (which are produced in cusum6 by
lw, uw) because they burn the first k_exog + 1 periods instead of the
first k_exog. If this change is performed
(so that `tmp = (self.nobs - d - 1)**0.5`), then the output here
matches cusum6.
The cusum6 behavior does not seem to be consistent with
Brown et al. (1975); it is likely they did that because they needed
three initial observations to get the initial OLS estimates, whereas
we do not need to do that.
"""
# Get the constant associated with the significance level
if alpha == 0.01:
scalar = 1.143
elif alpha == 0.05:
scalar = 0.948
elif alpha == 0.10:
scalar = 0.950
else:
raise ValueError('Invalid significance level.')
# Get the points for the significance bound lines
d = max(self.nobs_diffuse, self.loglikelihood_burn)
tmp = (self.nobs - d - ddof)**0.5
def upper_line(x):
return scalar * tmp + 2 * scalar * (x - d) / tmp
if points is None:
points = np.array([d, self.nobs])
return -upper_line(points), upper_line(points) | Parameters
----------
alpha : float, optional
The significance bound is alpha %.
ddof : int, optional
The number of periods additional to `k_exog` to exclude in
constructing the bounds. Default is zero. This is usually used
only for testing purposes.
points : iterable, optional
The points at which to evaluate the significance bounds. Default is
two points, beginning and end of the sample.
Notes
-----
Comparing against the cusum6 package for Stata, this does not produce
exactly the same confidence bands (which are produced in cusum6 by
lw, uw) because they burn the first k_exog + 1 periods instead of the
first k_exog. If this change is performed
(so that `tmp = (self.nobs - d - 1)**0.5`), then the output here
matches cusum6.
The cusum6 behavior does not seem to be consistent with
Brown et al. (1975); it is likely they did that because they needed
three initial observations to get the initial OLS estimates, whereas
we do not need to do that. | _cusum_significance_bounds | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def _cusum_squares_significance_bounds(self, alpha, points=None):
"""
Notes
-----
Comparing against the cusum6 package for Stata, this does not produce
exactly the same confidence bands (which are produced in cusum6 by
lww, uww) because they use a different method for computing the
critical value; in particular, they use tabled values from
Table C, pp. 364-365 of "The Econometric Analysis of Time Series"
Harvey, (1990), and use the value given to 99 observations for any
larger number of observations. In contrast, we use the approximating
critical values suggested in Edgerton and Wells (1994) which allows
computing relatively good approximations for any number of
observations.
"""
# Get the approximate critical value associated with the significance
# level
d = max(self.nobs_diffuse, self.loglikelihood_burn)
n = 0.5 * (self.nobs - d) - 1
try:
ix = [0.1, 0.05, 0.025, 0.01, 0.005].index(alpha / 2)
except ValueError:
raise ValueError('Invalid significance level.')
scalars = _cusum_squares_scalars[:, ix]
crit = scalars[0] / n**0.5 + scalars[1] / n + scalars[2] / n**1.5
# Get the points for the significance bound lines
if points is None:
points = np.array([d, self.nobs])
line = (points - d) / (self.nobs - d)
return line - crit, line + crit | Notes
-----
Comparing against the cusum6 package for Stata, this does not produce
exactly the same confidence bands (which are produced in cusum6 by
lww, uww) because they use a different method for computing the
critical value; in particular, they use tabled values from
Table C, pp. 364-365 of "The Econometric Analysis of Time Series"
Harvey, (1990), and use the value given to 99 observations for any
larger number of observations. In contrast, we use the approximating
critical values suggested in Edgerton and Wells (1994) which allows
computing relatively good approximations for any number of
observations. | _cusum_squares_significance_bounds | python | statsmodels/statsmodels | statsmodels/regression/recursive_ls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/recursive_ls.py | BSD-3-Clause |
def conf_int(self, obs=False, alpha=0.05):
"""
Returns the confidence interval of the value, `effect` of the
constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
se = self.se_obs if obs else self.se_mean
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
return np.column_stack((lower, upper)) | Returns the confidence interval of the value, `effect` of the
constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns. | conf_int | python | statsmodels/statsmodels | statsmodels/regression/_prediction.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/_prediction.py | BSD-3-Clause |
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, pred_kwds=None):
"""
Compute prediction results.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
weights : array_like, optional
Weights interpreted as in WLS, used for the variance of the predicted
residual.
row_labels : list
A list of row labels to use. If not provided, read `exog` is
available.
**kwargs
Some models can take additional keyword arguments, see the predict
method of the model for the details.
Returns
-------
linear_model.PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
# prepare exog and row_labels, based on base Results.predict
if transform and hasattr(self.model, 'formula') and exog is not None:
if isinstance(exog, pd.Series):
# GH-6509
exog = pd.DataFrame(exog)
exog = FormulaManager().get_matrices(self.model.data.model_spec, exog)
if exog is not None:
if row_labels is None:
row_labels = getattr(exog, 'index', None)
if callable(row_labels):
row_labels = None
exog = np.asarray(exog)
if exog.ndim == 1:
# Params informs whether a row or column vector
if self.params.shape[0] > 1:
exog = exog[None, :]
else:
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
else:
exog = self.model.exog
if weights is None:
weights = getattr(self.model, 'weights', None)
if row_labels is None:
row_labels = getattr(self.model.data, 'row_labels', None)
# need to handle other arrays, TODO: is delegating to model possible ?
if weights is not None:
weights = np.asarray(weights)
if (weights.size > 1 and
(weights.ndim != 1 or weights.shape[0] == exog.shape[1])):
raise ValueError('weights has wrong shape')
if pred_kwds is None:
pred_kwds = {}
predicted_mean = self.model.predict(self.params, exog, **pred_kwds)
covb = self.cov_params()
var_pred_mean = (exog * np.dot(covb, exog.T).T).sum(1)
var_resid = self.scale # self.mse_resid / weights
# TODO: check that we have correct scale, Refactor scale #???
# special case for now:
if self.cov_type == 'fixed scale':
var_resid = self.cov_kwds['scale']
if weights is not None:
var_resid /= weights
dist = ['norm', 't'][self.use_t]
return PredictionResults(predicted_mean, var_pred_mean, var_resid,
df=self.df_resid, dist=dist,
row_labels=row_labels) | Compute prediction results.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
weights : array_like, optional
Weights interpreted as in WLS, used for the variance of the predicted
residual.
row_labels : list
A list of row labels to use. If not provided, read `exog` is
available.
**kwargs
Some models can take additional keyword arguments, see the predict
method of the model for the details.
Returns
-------
linear_model.PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations. | get_prediction | python | statsmodels/statsmodels | statsmodels/regression/_prediction.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/_prediction.py | BSD-3-Clause |
def _get_sigma(sigma, nobs):
"""
Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its
Cholesky decomposition. Handles dimensions and checks integrity.
If sigma is None, returns None, None. Otherwise returns sigma,
cholsigmainv.
"""
if sigma is None:
return None, None
sigma = np.asarray(sigma).squeeze()
if sigma.ndim == 0:
sigma = np.repeat(sigma, nobs)
if sigma.ndim == 1:
if sigma.shape != (nobs,):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = 1/np.sqrt(sigma)
else:
if sigma.shape != (nobs, nobs):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv, info = dtrtri(cholesky(sigma, lower=True),
lower=True, overwrite_c=True)
if info > 0:
raise np.linalg.LinAlgError('Cholesky decomposition of sigma '
'yields a singular matrix')
elif info < 0:
raise ValueError('Invalid input to dtrtri (info = %d)' % info)
return sigma, cholsigmainv | Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its
Cholesky decomposition. Handles dimensions and checks integrity.
If sigma is None, returns None, None. Otherwise returns sigma,
cholsigmainv. | _get_sigma | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def initialize(self):
"""Initialize model components."""
self.wexog = self.whiten(self.exog)
self.wendog = self.whiten(self.endog)
# overwrite nobs from class Model:
self.nobs = float(self.wexog.shape[0])
self._df_model = None
self._df_resid = None
self.rank = None | Initialize model components. | initialize | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def df_model(self):
"""
The model degree of freedom.
The dof is defined as the rank of the regressor matrix minus 1 if a
constant is included.
"""
if self._df_model is None:
if self.rank is None:
self.rank = np.linalg.matrix_rank(self.exog)
self._df_model = float(self.rank - self.k_constant)
return self._df_model | The model degree of freedom.
The dof is defined as the rank of the regressor matrix minus 1 if a
constant is included. | df_model | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def df_resid(self):
"""
The residual degree of freedom.
The dof is defined as the number of observations minus the rank of
the regressor matrix.
"""
if self._df_resid is None:
if self.rank is None:
self.rank = np.linalg.matrix_rank(self.exog)
self._df_resid = self.nobs - self.rank
return self._df_resid | The residual degree of freedom.
The dof is defined as the number of observations minus the rank of
the regressor matrix. | df_resid | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def whiten(self, x):
"""
Whiten method that must be overwritten by individual models.
Parameters
----------
x : array_like
Data to be whitened.
"""
raise NotImplementedError("Subclasses must implement.") | Whiten method that must be overwritten by individual models.
Parameters
----------
x : array_like
Data to be whitened. | whiten | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def fit(
self,
method: Literal["pinv", "qr"] = "pinv",
cov_type: Literal[
"nonrobust",
"fixed scale",
"HC0",
"HC1",
"HC2",
"HC3",
"HAC",
"hac-panel",
"hac-groupsum",
"cluster",
] = "nonrobust",
cov_kwds=None,
use_t: bool | None = None,
**kwargs
):
"""
Full fit of the model.
The results include an estimate of covariance matrix, (whitened)
residuals and an estimate of scale.
Parameters
----------
method : str, optional
Can be "pinv", "qr". "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem. "qr" uses the QR
factorization.
cov_type : str, optional
See `regression.linear_model.RegressionResults` for a description
of the available covariance estimators.
cov_kwds : list or None, optional
See `linear_model.RegressionResults.get_robustcov_results` for a
description required keywords for alternative covariance
estimators.
use_t : bool, optional
Flag indicating to use the Student's t distribution when computing
p-values. Default behavior depends on cov_type. See
`linear_model.RegressionResults.get_robustcov_results` for
implementation details.
**kwargs
Additional keyword arguments that contain information used when
constructing a model using the formula interface.
Returns
-------
RegressionResults
The model estimation results.
See Also
--------
RegressionResults
The results container.
RegressionResults.get_robustcov_results
A method to change the covariance estimator used when fitting the
model.
Notes
-----
The fit method uses the pseudoinverse of the design/exogenous variables
to solve the least squares minimization.
"""
if method == "pinv":
if not (hasattr(self, 'pinv_wexog') and
hasattr(self, 'normalized_cov_params') and
hasattr(self, 'rank')):
self.pinv_wexog, singular_values = pinv_extended(self.wexog)
self.normalized_cov_params = np.dot(
self.pinv_wexog, np.transpose(self.pinv_wexog))
# Cache these singular values for use later.
self.wexog_singular_values = singular_values
self.rank = np.linalg.matrix_rank(np.diag(singular_values))
beta = np.dot(self.pinv_wexog, self.wendog)
elif method == "qr":
if not (hasattr(self, 'exog_Q') and
hasattr(self, 'exog_R') and
hasattr(self, 'normalized_cov_params') and
hasattr(self, 'rank')):
Q, R = np.linalg.qr(self.wexog)
self.exog_Q, self.exog_R = Q, R
self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
# Cache singular values from R.
self.wexog_singular_values = np.linalg.svd(R, 0, 0)
self.rank = np.linalg.matrix_rank(R)
else:
Q, R = self.exog_Q, self.exog_R
# Needed for some covariance estimators, see GH #8157
self.pinv_wexog = np.linalg.pinv(self.wexog)
# used in ANOVA
self.effects = effects = np.dot(Q.T, self.wendog)
beta = np.linalg.solve(R, effects)
else:
raise ValueError('method has to be "pinv" or "qr"')
if self._df_model is None:
self._df_model = float(self.rank - self.k_constant)
if self._df_resid is None:
self.df_resid = self.nobs - self.rank
if isinstance(self, OLS):
lfit = OLSResults(
self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)
else:
lfit = RegressionResults(
self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t,
**kwargs)
return RegressionResultsWrapper(lfit) | Full fit of the model.
The results include an estimate of covariance matrix, (whitened)
residuals and an estimate of scale.
Parameters
----------
method : str, optional
Can be "pinv", "qr". "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem. "qr" uses the QR
factorization.
cov_type : str, optional
See `regression.linear_model.RegressionResults` for a description
of the available covariance estimators.
cov_kwds : list or None, optional
See `linear_model.RegressionResults.get_robustcov_results` for a
description required keywords for alternative covariance
estimators.
use_t : bool, optional
Flag indicating to use the Student's t distribution when computing
p-values. Default behavior depends on cov_type. See
`linear_model.RegressionResults.get_robustcov_results` for
implementation details.
**kwargs
Additional keyword arguments that contain information used when
constructing a model using the formula interface.
Returns
-------
RegressionResults
The model estimation results.
See Also
--------
RegressionResults
The results container.
RegressionResults.get_robustcov_results
A method to change the covariance estimator used when fitting the
model.
Notes
-----
The fit method uses the pseudoinverse of the design/exogenous variables
to solve the least squares minimization. | fit | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array_like
Parameters of a linear model.
exog : array_like, optional
Design / exogenous data. Model exog is used if None.
Returns
-------
array_like
An array of fitted values.
Notes
-----
If the model has not yet been fit, params is not optional.
"""
# JP: this does not look correct for GLMAR
# SS: it needs its own predict method
if exog is None:
exog = self.exog
return np.dot(exog, params) | Return linear predicted values from a design matrix.
Parameters
----------
params : array_like
Parameters of a linear model.
exog : array_like, optional
Design / exogenous data. Model exog is used if None.
Returns
-------
array_like
An array of fitted values.
Notes
-----
If the model has not yet been fit, params is not optional. | predict | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def get_distribution(self, params, scale, exog=None, dist_class=None):
"""
Construct a random number generator for the predictive distribution.
Parameters
----------
params : array_like
The model parameters (regression coefficients).
scale : scalar
The variance parameter.
exog : array_like
The predictor variable matrix.
dist_class : class
A random number generator class. Must take 'loc' and 'scale'
as arguments and return a random number generator implementing
an ``rvs`` method for simulating random values. Defaults to normal.
Returns
-------
gen
Frozen random number generator object with mean and variance
determined by the fitted linear model. Use the ``rvs`` method
to generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``,
the returned random number generator must be called with
``gen.rvs(n)`` where ``n`` is the number of observations in
the data set used to fit the model. If any other value is
used for ``n``, misleading results will be produced.
"""
fit = self.predict(params, exog)
if dist_class is None:
from scipy.stats.distributions import norm
dist_class = norm
gen = dist_class(loc=fit, scale=np.sqrt(scale))
return gen | Construct a random number generator for the predictive distribution.
Parameters
----------
params : array_like
The model parameters (regression coefficients).
scale : scalar
The variance parameter.
exog : array_like
The predictor variable matrix.
dist_class : class
A random number generator class. Must take 'loc' and 'scale'
as arguments and return a random number generator implementing
an ``rvs`` method for simulating random values. Defaults to normal.
Returns
-------
gen
Frozen random number generator object with mean and variance
determined by the fitted linear model. Use the ``rvs`` method
to generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``,
the returned random number generator must be called with
``gen.rvs(n)`` where ``n`` is the number of observations in
the data set used to fit the model. If any other value is
used for ``n``, misleading results will be produced. | get_distribution | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def whiten(self, x):
"""
GLS whiten method.
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
ndarray
The value np.dot(cholsigmainv,X).
See Also
--------
GLS : Fit a linear model using Generalized Least Squares.
"""
x = np.asarray(x)
if self.sigma is None or self.sigma.shape == ():
return x
elif self.sigma.ndim == 1:
if x.ndim == 1:
return x * self.cholsigmainv
else:
return x * self.cholsigmainv[:, None]
else:
return np.dot(self.cholsigmainv, x) | GLS whiten method.
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
ndarray
The value np.dot(cholsigmainv,X).
See Also
--------
GLS : Fit a linear model using Generalized Least Squares. | whiten | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def hessian_factor(self, params, scale=None, observed=True):
"""
Compute weights for calculating Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`.
"""
if self.sigma is None or self.sigma.shape == ():
return np.ones(self.exog.shape[0])
elif self.sigma.ndim == 1:
return self.cholsigmainv
else:
return np.diag(self.cholsigmainv) | Compute weights for calculating Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`. | hessian_factor | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def whiten(self, x):
"""
Whitener for WLS model, multiplies each column by sqrt(self.weights).
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
array_like
The whitened values sqrt(weights)*X.
"""
x = np.asarray(x)
if x.ndim == 1:
return x * np.sqrt(self.weights)
elif x.ndim == 2:
return np.sqrt(self.weights)[:, None] * x | Whitener for WLS model, multiplies each column by sqrt(self.weights).
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
array_like
The whitened values sqrt(weights)*X. | whiten | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def hessian_factor(self, params, scale=None, observed=True):
"""
Compute the weights for calculating the Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`.
"""
return self.weights | Compute the weights for calculating the Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`. | hessian_factor | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def loglike(self, params, scale=None):
"""
The likelihood function for the OLS model.
Parameters
----------
params : array_like
The coefficients with which to estimate the log-likelihood.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
float
The likelihood function evaluated at params.
"""
nobs2 = self.nobs / 2.0
nobs = float(self.nobs)
resid = self.endog - np.dot(self.exog, params)
if hasattr(self, 'offset'):
resid -= self.offset
ssr = np.sum(resid**2)
if scale is None:
# profile log likelihood
llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
else:
# log-likelihood
llf = -nobs2 * np.log(2 * np.pi * scale) - ssr / (2*scale)
return llf | The likelihood function for the OLS model.
Parameters
----------
params : array_like
The coefficients with which to estimate the log-likelihood.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
float
The likelihood function evaluated at params. | loglike | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def whiten(self, x):
"""
OLS model whitener does nothing.
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
array_like
The input array unmodified.
See Also
--------
OLS : Fit a linear model using Ordinary Least Squares.
"""
return x | OLS model whitener does nothing.
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
array_like
The input array unmodified.
See Also
--------
OLS : Fit a linear model using Ordinary Least Squares. | whiten | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def score(self, params, scale=None):
"""
Evaluate the score function at a given point.
The score corresponds to the profile (concentrated)
log-likelihood in which the scale parameter has been profiled
out.
Parameters
----------
params : array_like
The parameter vector at which the score function is
computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
ndarray
The score vector.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
sdr = -self._wexog_x_wendog + xtxb
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T,
params)
ssr += np.dot(params, xtxb)
return -self.nobs * sdr / ssr
else:
return -sdr / scale | Evaluate the score function at a given point.
The score corresponds to the profile (concentrated)
log-likelihood in which the scale parameter has been profiled
out.
Parameters
----------
params : array_like
The parameter vector at which the score function is
computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
ndarray
The score vector. | score | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def hessian(self, params, scale=None):
"""
Evaluate the Hessian function at a given point.
Parameters
----------
params : array_like
The parameter vector at which the Hessian is computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
ndarray
The Hessian matrix.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T,
params)
ssr += np.dot(params, xtxb)
ssrp = -2*self._wexog_x_wendog + 2*xtxb
hm = self._wexog_xprod / ssr - np.outer(ssrp, ssrp) / ssr**2
return -self.nobs * hm / 2
else:
return -self._wexog_xprod / scale | Evaluate the Hessian function at a given point.
Parameters
----------
params : array_like
The parameter vector at which the Hessian is computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
ndarray
The Hessian matrix. | hessian | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def hessian_factor(self, params, scale=None, observed=True):
"""
Calculate the weights for the Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`.
"""
return np.ones(self.exog.shape[0]) | Calculate the weights for the Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`. | hessian_factor | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def _fit_ridge(self, alpha):
"""
Fit a linear model using ridge regression.
Parameters
----------
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
Notes
-----
Equivalent to fit_regularized with L1_wt = 0 (but implemented
more efficiently).
"""
u, s, vt = np.linalg.svd(self.exog, 0)
v = vt.T
q = np.dot(u.T, self.endog) * s
s2 = s * s
if np.isscalar(alpha):
sd = s2 + alpha * self.nobs
params = q / sd
params = np.dot(v, params)
else:
alpha = np.asarray(alpha)
vtav = self.nobs * np.dot(vt, alpha[:, None] * v)
d = np.diag(vtav) + s2
np.fill_diagonal(vtav, d)
r = np.linalg.solve(vtav, q)
params = np.dot(v, r)
from statsmodels.base.elastic_net import RegularizedResults
return RegularizedResults(self, params) | Fit a linear model using ridge regression.
Parameters
----------
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
Notes
-----
Equivalent to fit_regularized with L1_wt = 0 (but implemented
more efficiently). | _fit_ridge | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def iterative_fit(self, maxiter=3, rtol=1e-4, **kwargs):
"""
Perform an iterative two-stage procedure to estimate a GLS model.
The model is assumed to have AR(p) errors, AR(p) parameters and
regression coefficients are estimated iteratively.
Parameters
----------
maxiter : int, optional
The number of iterations.
rtol : float, optional
Relative tolerance between estimated coefficients to stop the
estimation. Stops if max(abs(last - current) / abs(last)) < rtol.
**kwargs
Additional keyword arguments passed to `fit`.
Returns
-------
RegressionResults
The results computed using an iterative fit.
"""
# TODO: update this after going through example.
converged = False
i = -1 # need to initialize for maxiter < 1 (skip loop)
history = {'params': [], 'rho': [self.rho]}
for i in range(maxiter - 1):
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
results = self.fit()
history['params'].append(results.params)
if i == 0:
last = results.params
else:
diff = np.max(np.abs(last - results.params) / np.abs(last))
if diff < rtol:
converged = True
break
last = results.params
self.rho, _ = yule_walker(results.resid,
order=self.order, df=None)
history['rho'].append(self.rho)
# why not another call to self.initialize
# Use kwarg to insert history
if not converged and maxiter > 0:
# maxiter <= 0 just does OLS
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
# if converged then this is a duplicate fit, because we did not
# update rho
results = self.fit(history=history, **kwargs)
results.iter = i + 1
# add last fit to history, not if duplicate fit
if not converged:
results.history['params'].append(results.params)
results.iter += 1
results.converged = converged
return results | Perform an iterative two-stage procedure to estimate a GLS model.
The model is assumed to have AR(p) errors, AR(p) parameters and
regression coefficients are estimated iteratively.
Parameters
----------
maxiter : int, optional
The number of iterations.
rtol : float, optional
Relative tolerance between estimated coefficients to stop the
estimation. Stops if max(abs(last - current) / abs(last)) < rtol.
**kwargs
Additional keyword arguments passed to `fit`.
Returns
-------
RegressionResults
The results computed using an iterative fit. | iterative_fit | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def whiten(self, x):
"""
Whiten a series of columns according to an AR(p) covariance structure.
Whitening using this method drops the initial p observations.
Parameters
----------
x : array_like
The data to be whitened.
Returns
-------
ndarray
The whitened data.
"""
# TODO: notation for AR process
x = np.asarray(x, np.float64)
_x = x.copy()
# the following loops over the first axis, works for 1d and nd
for i in range(self.order):
_x[(i + 1):] = _x[(i + 1):] - self.rho[i] * x[0:-(i + 1)]
return _x[self.order:] | Whiten a series of columns according to an AR(p) covariance structure.
Whitening using this method drops the initial p observations.
Parameters
----------
x : array_like
The data to be whitened.
Returns
-------
ndarray
The whitened data. | whiten | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def yule_walker(x, order=1, method="adjusted", df=None, inv=False,
demean=True):
"""
Estimate AR(p) parameters from a sequence using the Yule-Walker equations.
Adjusted or maximum-likelihood estimator (mle)
Parameters
----------
x : array_like
A 1d array.
order : int, optional
The order of the autoregressive process. Default is 1.
method : str, optional
Method can be 'adjusted' or 'mle' and this determines
denominator in estimate of autocorrelation function (ACF) at
lag k. If 'mle', the denominator is n=X.shape[0], if 'adjusted'
the denominator is n-k. The default is adjusted.
df : int, optional
Specifies the degrees of freedom. If `df` is supplied, then it
is assumed the X has `df` degrees of freedom rather than `n`.
Default is None.
inv : bool
If inv is True the inverse of R is also returned. Default is
False.
demean : bool
True, the mean is subtracted from `X` before estimation.
Returns
-------
rho : ndarray
AR(p) coefficients computed using the Yule-Walker method.
sigma : float
The estimate of the residual standard deviation.
See Also
--------
burg : Burg's AR estimator.
Notes
-----
See https://en.wikipedia.org/wiki/Autoregressive_moving_average_model for
further details.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load()
>>> rho, sigma = sm.regression.yule_walker(data.endog, order=4,
... method="mle")
>>> rho
array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])
>>> sigma
16.808022730464351
"""
# TODO: define R better, look back at notes and technical notes on YW.
# First link here is useful
# http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm
method = string_like(
method, "method", options=("adjusted", "unbiased", "mle")
)
if method == "unbiased":
warnings.warn(
"unbiased is deprecated in factor of adjusted to reflect that the "
"term is adjusting the sample size used in the autocovariance "
"calculation rather than estimating an unbiased autocovariance. "
"After release 0.13, using 'unbiased' will raise.",
FutureWarning,
)
method = "adjusted"
if method not in ("adjusted", "mle"):
raise ValueError("ACF estimation method must be 'adjusted' or 'MLE'")
# TODO: Require??
x = np.array(x, dtype=np.float64)
if demean:
if not x.flags.writeable:
x = np.require(x, requirements="W")
x -= x.mean()
n = df or x.shape[0]
# this handles df_resid ie., n - p
adj_needed = method == "adjusted"
if x.ndim > 1 and x.shape[1] != 1:
raise ValueError("expecting a vector to estimate AR parameters")
r = np.zeros(order+1, np.float64)
r[0] = (x ** 2).sum() / n
for k in range(1, order+1):
r[k] = (x[0:-k] * x[k:]).sum() / (n - k * adj_needed)
R = toeplitz(r[:-1])
try:
rho = np.linalg.solve(R, r[1:])
except np.linalg.LinAlgError as err:
if 'Singular matrix' in str(err):
warnings.warn("Matrix is singular. Using pinv.", ValueWarning)
rho = np.linalg.pinv(R) @ r[1:]
else:
raise
sigmasq = r[0] - (r[1:]*rho).sum()
if not np.isnan(sigmasq) and sigmasq > 0:
sigma = np.sqrt(sigmasq)
else:
sigma = np.nan
if inv:
return rho, sigma, np.linalg.inv(R)
else:
return rho, sigma | Estimate AR(p) parameters from a sequence using the Yule-Walker equations.
Adjusted or maximum-likelihood estimator (mle)
Parameters
----------
x : array_like
A 1d array.
order : int, optional
The order of the autoregressive process. Default is 1.
method : str, optional
Method can be 'adjusted' or 'mle' and this determines
denominator in estimate of autocorrelation function (ACF) at
lag k. If 'mle', the denominator is n=X.shape[0], if 'adjusted'
the denominator is n-k. The default is adjusted.
df : int, optional
Specifies the degrees of freedom. If `df` is supplied, then it
is assumed the X has `df` degrees of freedom rather than `n`.
Default is None.
inv : bool
If inv is True the inverse of R is also returned. Default is
False.
demean : bool
True, the mean is subtracted from `X` before estimation.
Returns
-------
rho : ndarray
AR(p) coefficients computed using the Yule-Walker method.
sigma : float
The estimate of the residual standard deviation.
See Also
--------
burg : Burg's AR estimator.
Notes
-----
See https://en.wikipedia.org/wiki/Autoregressive_moving_average_model for
further details.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load()
>>> rho, sigma = sm.regression.yule_walker(data.endog, order=4,
... method="mle")
>>> rho
array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])
>>> sigma
16.808022730464351 | yule_walker | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def burg(endog, order=1, demean=True):
"""
Compute Burg's AP(p) parameter estimator.
Parameters
----------
endog : array_like
The endogenous variable.
order : int, optional
Order of the AR. Default is 1.
demean : bool, optional
Flag indicating to subtract the mean from endog before estimation.
Returns
-------
rho : ndarray
The AR(p) coefficients computed using Burg's algorithm.
sigma2 : float
The estimate of the residual variance.
See Also
--------
yule_walker : Estimate AR parameters using the Yule-Walker method.
Notes
-----
AR model estimated includes a constant that is estimated using the sample
mean (see [1]_). This value is not reported.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load()
>>> rho, sigma2 = sm.regression.linear_model.burg(data.endog, order=4)
>>> rho
array([ 1.30934186, -0.48086633, -0.20185982, 0.05501941])
>>> sigma2
271.2467306963966
"""
# Avoid circular imports
from statsmodels.tsa.stattools import levinson_durbin_pacf, pacf_burg
endog = np.squeeze(np.asarray(endog))
if endog.ndim != 1:
raise ValueError('endog must be 1-d or squeezable to 1-d.')
order = int(order)
if order < 1:
raise ValueError('order must be an integer larger than 1')
if demean:
endog = endog - endog.mean()
pacf, sigma = pacf_burg(endog, order, demean=demean)
ar, _ = levinson_durbin_pacf(pacf)
return ar, sigma[-1] | Compute Burg's AP(p) parameter estimator.
Parameters
----------
endog : array_like
The endogenous variable.
order : int, optional
Order of the AR. Default is 1.
demean : bool, optional
Flag indicating to subtract the mean from endog before estimation.
Returns
-------
rho : ndarray
The AR(p) coefficients computed using Burg's algorithm.
sigma2 : float
The estimate of the residual variance.
See Also
--------
yule_walker : Estimate AR parameters using the Yule-Walker method.
Notes
-----
AR model estimated includes a constant that is estimated using the sample
mean (see [1]_). This value is not reported.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load()
>>> rho, sigma2 = sm.regression.linear_model.burg(data.endog, order=4)
>>> rho
array([ 1.30934186, -0.48086633, -0.20185982, 0.05501941])
>>> sigma2
271.2467306963966 | burg | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def conf_int(self, alpha=.05, cols=None):
"""
Compute the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. The default
`alpha` = .05 returns a 95% confidence interval.
cols : array_like, optional
Columns to include in returned confidence intervals.
Returns
-------
array_like
The confidence intervals.
Notes
-----
The confidence interval is based on Student's t-distribution.
"""
# keep method for docstring for now
ci = super().conf_int(alpha=alpha, cols=cols)
return ci | Compute the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. The default
`alpha` = .05 returns a 95% confidence interval.
cols : array_like, optional
Columns to include in returned confidence intervals.
Returns
-------
array_like
The confidence intervals.
Notes
-----
The confidence interval is based on Student's t-distribution. | conf_int | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def nobs(self):
"""Number of observations n."""
return float(self.model.wexog.shape[0]) | Number of observations n. | nobs | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def fittedvalues(self):
"""The predicted values for the original (unwhitened) design."""
return self.model.predict(self.params, self.model.exog) | The predicted values for the original (unwhitened) design. | fittedvalues | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def wresid(self):
"""
The residuals of the transformed/whitened regressand and regressor(s).
"""
return self.model.wendog - self.model.predict(
self.params, self.model.wexog) | The residuals of the transformed/whitened regressand and regressor(s). | wresid | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def resid(self):
"""The residuals of the model."""
return self.model.endog - self.model.predict(
self.params, self.model.exog) | The residuals of the model. | resid | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def scale(self):
"""
A scale factor for the covariance matrix.
The Default value is ssr/(n-p). Note that the square root of `scale`
is often called the standard error of the regression.
"""
wresid = self.wresid
return np.dot(wresid, wresid) / self.df_resid | A scale factor for the covariance matrix.
The Default value is ssr/(n-p). Note that the square root of `scale`
is often called the standard error of the regression. | scale | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def ssr(self):
"""Sum of squared (whitened) residuals."""
wresid = self.wresid
return np.dot(wresid, wresid) | Sum of squared (whitened) residuals. | ssr | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def centered_tss(self):
"""The total (weighted) sum of squares centered about the mean."""
model = self.model
weights = getattr(model, 'weights', None)
sigma = getattr(model, 'sigma', None)
if weights is not None:
mean = np.average(model.endog, weights=weights)
return np.sum(weights * (model.endog - mean)**2)
elif sigma is not None:
# Exactly matches WLS when sigma is diagonal
iota = np.ones_like(model.endog)
iota = model.whiten(iota)
mean = model.wendog.dot(iota) / iota.dot(iota)
err = model.endog - mean
err = model.whiten(err)
return np.sum(err**2)
else:
centered_endog = model.wendog - model.wendog.mean()
return np.dot(centered_endog, centered_endog) | The total (weighted) sum of squares centered about the mean. | centered_tss | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def uncentered_tss(self):
"""
Uncentered sum of squares.
The sum of the squared values of the (whitened) endogenous response
variable.
"""
wendog = self.model.wendog
return np.dot(wendog, wendog) | Uncentered sum of squares.
The sum of the squared values of the (whitened) endogenous response
variable. | uncentered_tss | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def ess(self):
"""
The explained sum of squares.
If a constant is present, the centered total sum of squares minus the
sum of squared residuals. If there is no constant, the uncentered total
sum of squares is used.
"""
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr | The explained sum of squares.
If a constant is present, the centered total sum of squares minus the
sum of squared residuals. If there is no constant, the uncentered total
sum of squares is used. | ess | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def rsquared(self):
"""
R-squared of the model.
This is defined here as 1 - `ssr`/`centered_tss` if the constant is
included in the model and 1 - `ssr`/`uncentered_tss` if the constant is
omitted.
"""
if self.k_constant:
return 1 - self.ssr/self.centered_tss
else:
return 1 - self.ssr/self.uncentered_tss | R-squared of the model.
This is defined here as 1 - `ssr`/`centered_tss` if the constant is
included in the model and 1 - `ssr`/`uncentered_tss` if the constant is
omitted. | rsquared | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def rsquared_adj(self):
"""
Adjusted R-squared.
This is defined here as 1 - (`nobs`-1)/`df_resid` * (1-`rsquared`)
if a constant is included and 1 - `nobs`/`df_resid` * (1-`rsquared`) if
no constant is included.
"""
return 1 - (np.divide(self.nobs - self.k_constant, self.df_resid)
* (1 - self.rsquared)) | Adjusted R-squared.
This is defined here as 1 - (`nobs`-1)/`df_resid` * (1-`rsquared`)
if a constant is included and 1 - `nobs`/`df_resid` * (1-`rsquared`) if
no constant is included. | rsquared_adj | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def mse_model(self):
"""
Mean squared error the model.
The explained sum of squares divided by the model degrees of freedom.
"""
if np.all(self.df_model == 0.0):
return np.full_like(self.ess, np.nan)
return self.ess/self.df_model | Mean squared error the model.
The explained sum of squares divided by the model degrees of freedom. | mse_model | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def mse_resid(self):
"""
Mean squared error of the residuals.
The sum of squared residuals divided by the residual degrees of
freedom.
"""
if np.all(self.df_resid == 0.0):
return np.full_like(self.ssr, np.nan)
return self.ssr/self.df_resid | Mean squared error of the residuals.
The sum of squared residuals divided by the residual degrees of
freedom. | mse_resid | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def mse_total(self):
"""
Total mean squared error.
The uncentered total sum of squares divided by the number of
observations.
"""
if np.all(self.df_resid + self.df_model == 0.0):
return np.full_like(self.centered_tss, np.nan)
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model) | Total mean squared error.
The uncentered total sum of squares divided by the number of
observations. | mse_total | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def fvalue(self):
"""
F-statistic of the fully specified model.
Calculated as the mean squared error of the model divided by the mean
squared error of the residuals if the nonrobust covariance is used.
Otherwise computed using a Wald-like quadratic form that tests whether
all coefficients (excluding the constant) are zero.
"""
if hasattr(self, 'cov_type') and self.cov_type != 'nonrobust':
# with heteroscedasticity or correlation robustness
k_params = self.normalized_cov_params.shape[0]
mat = np.eye(k_params)
const_idx = self.model.data.const_idx
# TODO: What if model includes implicit constant, e.g. all
# dummies but no constant regressor?
# TODO: Restats as LM test by projecting orthogonalizing
# to constant?
if self.model.data.k_constant == 1:
# if constant is implicit, return nan see #2444
if const_idx is None:
return np.nan
idx = lrange(k_params)
idx.pop(const_idx)
mat = mat[idx] # remove constant
if mat.size == 0: # see #3642
return np.nan
ft = self.f_test(mat)
# using backdoor to set another attribute that we already have
self._cache['f_pvalue'] = float(ft.pvalue)
return float(ft.fvalue)
else:
# for standard homoscedastic case
return self.mse_model/self.mse_resid | F-statistic of the fully specified model.
Calculated as the mean squared error of the model divided by the mean
squared error of the residuals if the nonrobust covariance is used.
Otherwise computed using a Wald-like quadratic form that tests whether
all coefficients (excluding the constant) are zero. | fvalue | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def f_pvalue(self):
"""The p-value of the F-statistic."""
# Special case for df_model 0
if self.df_model == 0:
return np.full_like(self.fvalue, np.nan)
return stats.f.sf(self.fvalue, self.df_model, self.df_resid) | The p-value of the F-statistic. | f_pvalue | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def bse(self):
"""The standard errors of the parameter estimates."""
return np.sqrt(np.diag(self.cov_params())) | The standard errors of the parameter estimates. | bse | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def info_criteria(self, crit, dk_params=0):
"""Return an information criterion for the model.
Parameters
----------
crit : string
One of 'aic', 'bic', 'aicc' or 'hqic'.
dk_params : int or float
Correction to the number of parameters used in the information
criterion. By default, only mean parameters are included, the
scale parameter is not included in the parameter count.
Use ``dk_params=1`` to include scale in the parameter count.
Returns
-------
Value of information criterion.
References
----------
Burnham KP, Anderson KR (2002). Model Selection and Multimodel
Inference; Springer New York.
"""
crit = crit.lower()
k_params = self.df_model + self.k_constant + dk_params
if crit == "aic":
return -2 * self.llf + 2 * k_params
elif crit == "bic":
bic = -2*self.llf + np.log(self.nobs) * k_params
return bic
elif crit == "aicc":
from statsmodels.tools.eval_measures import aicc
return aicc(self.llf, self.nobs, k_params)
elif crit == "hqic":
from statsmodels.tools.eval_measures import hqic
return hqic(self.llf, self.nobs, k_params) | Return an information criterion for the model.
Parameters
----------
crit : string
One of 'aic', 'bic', 'aicc' or 'hqic'.
dk_params : int or float
Correction to the number of parameters used in the information
criterion. By default, only mean parameters are included, the
scale parameter is not included in the parameter count.
Use ``dk_params=1`` to include scale in the parameter count.
Returns
-------
Value of information criterion.
References
----------
Burnham KP, Anderson KR (2002). Model Selection and Multimodel
Inference; Springer New York. | info_criteria | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def eigenvals(self):
"""
Return eigenvalues sorted in decreasing order.
"""
if self._wexog_singular_values is not None:
eigvals = self._wexog_singular_values ** 2
else:
wx = self.model.wexog
eigvals = np.linalg.eigvalsh(wx.T @ wx)
return np.sort(eigvals)[::-1] | Return eigenvalues sorted in decreasing order. | eigenvals | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def condition_number(self):
"""
Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest singular value of the
exogenous variables. This value is the same as the square root of
the ratio of the largest to smallest eigenvalue of the inner-product
of the exogenous variables.
"""
eigvals = self.eigenvals
return np.sqrt(eigvals[0]/eigvals[-1]) | Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest singular value of the
exogenous variables. This value is the same as the square root of
the ratio of the largest to smallest eigenvalue of the inner-product
of the exogenous variables. | condition_number | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def cov_HC0(self):
"""
Heteroscedasticity robust covariance matrix. See HC0_se.
"""
self.het_scale = self.wresid**2
cov_HC0 = self._HCCM(self.het_scale)
return cov_HC0 | Heteroscedasticity robust covariance matrix. See HC0_se. | cov_HC0 | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def cov_HC1(self):
"""
Heteroscedasticity robust covariance matrix. See HC1_se.
"""
self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2)
cov_HC1 = self._HCCM(self.het_scale)
return cov_HC1 | Heteroscedasticity robust covariance matrix. See HC1_se. | cov_HC1 | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def cov_HC2(self):
"""
Heteroscedasticity robust covariance matrix. See HC2_se.
"""
wexog = self.model.wexog
h = self._abat_diagonal(wexog, self.normalized_cov_params)
self.het_scale = self.wresid**2/(1-h)
cov_HC2 = self._HCCM(self.het_scale)
return cov_HC2 | Heteroscedasticity robust covariance matrix. See HC2_se. | cov_HC2 | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def cov_HC3(self):
"""
Heteroscedasticity robust covariance matrix. See HC3_se.
"""
wexog = self.model.wexog
h = self._abat_diagonal(wexog, self.normalized_cov_params)
self.het_scale = (self.wresid / (1 - h))**2
cov_HC3 = self._HCCM(self.het_scale)
return cov_HC3 | Heteroscedasticity robust covariance matrix. See HC3_se. | cov_HC3 | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def HC0_se(self):
"""
White's (1980) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i].
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2.
"""
return np.sqrt(np.diag(self.cov_HC0)) | White's (1980) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i].
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2. | HC0_se | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def HC1_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(n/(n-p)*HC_0).
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2.
"""
return np.sqrt(np.diag(self.cov_HC1)) | MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(n/(n-p)*HC_0).
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2. | HC1_se | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def HC2_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii).
"""
return np.sqrt(np.diag(self.cov_HC2)) | MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii). | HC2_se | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def HC3_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2).
"""
return np.sqrt(np.diag(self.cov_HC3)) | MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2). | HC3_se | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def resid_pearson(self):
"""
Residuals, normalized to have unit variance.
Returns
-------
array_like
The array `wresid` normalized by the sqrt of the scale to have
unit variance.
"""
if not hasattr(self, 'resid'):
raise ValueError('Method requires residuals.')
eps = np.finfo(self.wresid.dtype).eps
if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean():
# do not divide if scale is zero close to numerical precision
warnings.warn(
"All residuals are 0, cannot compute normed residuals.",
RuntimeWarning
)
return self.wresid
else:
return self.wresid / np.sqrt(self.scale) | Residuals, normalized to have unit variance.
Returns
-------
array_like
The array `wresid` normalized by the sqrt of the scale to have
unit variance. | resid_pearson | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def _is_nested(self, restricted):
"""
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller
model are spanned by the regressors in the larger model and
the regressand is identical.
"""
if self.model.nobs != restricted.model.nobs:
return False
full_rank = self.model.rank
restricted_rank = restricted.model.rank
if full_rank <= restricted_rank:
return False
restricted_exog = restricted.model.wexog
full_wresid = self.wresid
scores = restricted_exog * full_wresid[:, None]
score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2))
# TODO: Could be improved, and may fail depending on scale of
# regressors
return np.allclose(score_l2, 0) | Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller
model are spanned by the regressors in the larger model and
the regressand is identical. | _is_nested | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def compare_lm_test(self, restricted, demean=True, use_lr=False):
"""
Use Lagrange Multiplier test to test a set of linear restrictions.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the
residuals from the restricted model. If True, the covariance of
the scores are used and the LM test is identical to the large
sample version of the LR test.
use_lr : bool
A flag indicating whether to estimate the covariance of the model
scores using the unrestricted model. Setting the to True improves
the power of the test.
Returns
-------
lm_value : float
The test statistic which has a chi2 distributed.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The LM test examines whether the scores from the restricted model are
0. If the null is true, and the restrictions are valid, then the
parameters of the restricted model should be close to the minimum of
the sum of squared errors, and so the scores should be close to zero,
on average.
"""
from numpy.linalg import inv
import statsmodels.stats.sandwich_covariance as sw
if not self._is_nested(restricted):
raise ValueError("Restricted model is not nested by full model.")
wresid = restricted.wresid
wexog = self.model.wexog
scores = wexog * wresid[:, None]
n = self.nobs
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
s = scores.mean(axis=0)
if use_lr:
scores = wexog * self.wresid[:, None]
demean = False
if demean:
scores = scores - scores.mean(0)[None, :]
# Form matters here. If homoskedastics can be sigma^2 (X'X)^-1
# If Heteroskedastic then the form below is fine
# If HAC then need to use HAC
# If Cluster, should use cluster
cov_type = getattr(self, 'cov_type', 'nonrobust')
if cov_type == 'nonrobust':
sigma2 = np.mean(wresid**2)
xpx = np.dot(wexog.T, wexog) / n
s_inv = inv(sigma2 * xpx)
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
s_inv = inv(np.dot(scores.T, scores) / n)
elif cov_type == 'HAC':
maxlags = self.cov_kwds['maxlags']
s_inv = inv(sw.S_hac_simple(scores, maxlags) / n)
elif cov_type == 'cluster':
# cluster robust standard errors
groups = self.cov_kwds['groups']
# TODO: Might need demean option in S_crosssection by group?
s_inv = inv(sw.S_crosssection(scores, groups))
else:
raise ValueError('Only nonrobust, HC, HAC and cluster are ' +
'currently connected')
lm_value = n * (s @ s_inv @ s.T)
p_value = stats.chi2.sf(lm_value, df_diff)
return lm_value, p_value, df_diff | Use Lagrange Multiplier test to test a set of linear restrictions.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the
residuals from the restricted model. If True, the covariance of
the scores are used and the LM test is identical to the large
sample version of the LR test.
use_lr : bool
A flag indicating whether to estimate the covariance of the model
scores using the unrestricted model. Setting the to True improves
the power of the test.
Returns
-------
lm_value : float
The test statistic which has a chi2 distributed.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The LM test examines whether the scores from the restricted model are
0. If the null is true, and the restrictions are valid, then the
parameters of the restricted model should be close to the minimum of
the sum of squared errors, and so the scores should be close to zero,
on average. | compare_lm_test | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def compare_f_test(self, restricted):
"""
Use F test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
Returns
-------
f_value : float
The test statistic which has an F distribution.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in
df between models.
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two
models. This is not a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results under
the assumption of homoscedasticity and no autocorrelation
(sphericity).
"""
has_robust1 = getattr(self, 'cov_type', 'nonrobust') != 'nonrobust'
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('F test for comparison is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
ssr_full = self.ssr
ssr_restr = restricted.ssr
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full
p_value = stats.f.sf(f_value, df_diff, df_full)
return f_value, p_value, df_diff | Use F test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
Returns
-------
f_value : float
The test statistic which has an F distribution.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in
df between models.
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two
models. This is not a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results under
the assumption of homoscedasticity and no autocorrelation
(sphericity). | compare_f_test | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def compare_lr_test(self, restricted, large_sample=False):
"""
Likelihood ratio test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
The likelihood ratio which is chisquare distributed with df_diff
degrees of freedom.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The exact likelihood ratio is valid for homoskedastic data,
and is defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\\mathcal{L}` is the likelihood of the
model. With :math:`D` distributed as chisquare with df equal
to difference in number of parameters or equivalently
difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
"""
# TODO: put into separate function, needs tests
# See mailing list discussion October 17,
if large_sample:
return self.compare_lm_test(restricted, use_lr=True)
has_robust1 = (getattr(self, 'cov_type', 'nonrobust') != 'nonrobust')
has_robust2 = (
getattr(restricted, 'cov_type', 'nonrobust') != 'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('Likelihood Ratio test is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
llf_full = self.llf
llf_restr = restricted.llf
df_full = self.df_resid
df_restr = restricted.df_resid
lrdf = (df_restr - df_full)
lrstat = -2*(llf_restr - llf_full)
lr_pvalue = stats.chi2.sf(lrstat, lrdf)
return lrstat, lr_pvalue, lrdf | Likelihood ratio test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
The likelihood ratio which is chisquare distributed with df_diff
degrees of freedom.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The exact likelihood ratio is valid for homoskedastic data,
and is defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\\mathcal{L}` is the likelihood of the
model. With :math:`D` distributed as chisquare with df equal
to difference in number of parameters or equivalently
difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model. | compare_lr_test | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwargs):
"""
Create new results instance with robust covariance as default.
Parameters
----------
cov_type : str
The type of robust sandwich estimator to use. See Notes below.
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`True` if the cov_type is nonrobust, and `False` in all other
cases.
**kwargs
Required or optional arguments for robust covariance calculation.
See Notes below.
Returns
-------
RegressionResults
This method creates a new results instance with the
requested robust covariance as the default covariance of
the parameters. Inferential statistics like p-values and
hypothesis tests will be based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' uses a predefined scale
``scale``: float, optional
Argument to set the scale. Default is 1.
- 'HC0', 'HC1', 'HC2', 'HC3': heteroscedasticity robust covariance
- no keyword arguments
- 'HAC': heteroskedasticity-autocorrelation robust covariance
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
kernels currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
``use_correction``: bool, optional
If true, use small sample correction
- 'cluster': clustered covariance estimator
``groups`` : array_like[int], required :
Integer-valued index of clusters or groups.
``use_correction``: bool, optional
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
``df_correction``: bool, optional
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is also
adjusted. When `use_t` is also True, then pvalues are
computed using the Student's t distribution using the
corrected values. These may differ substantially from
p-values based on the normal is the number of groups is
small.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum': Driscoll and Kraay, heteroscedasticity and
autocorrelation robust covariance for panel data
# TODO: more options needed here
``time`` : array_like, required
index of time periods
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
The available kernels are ['bartlett', 'uniform']. The default is
Bartlett.
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the the sandwich covariance is calculated without small
sample correction. If `use_correction = 'cluster'` (default),
then the same small sample correction as in the case of
`covtype='cluster'` is used.
``df_correction`` : bool, optional
The adjustment to df_resid, see cov_type 'cluster' above
- 'hac-panel': heteroscedasticity and autocorrelation robust standard
errors in panel data. The data needs to be sorted in this case, the
time series for each panel unit or cluster need to be stacked. The
membership to a time series of an individual or group can be either
specified by group indicators or by increasing time periods. One of
``groups`` or ``time`` is required. # TODO: we need more options here
``groups`` : array_like[int]
indicator for groups
``time`` : array_like[int]
index of time periods
``maxlags`` : int, required
number of lags to use
``kernel`` : {callable, str}, optional
Available kernels are ['bartlett', 'uniform'], default
is Bartlett
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the sandwich covariance is calculated without
small sample correction.
``df_correction`` : bool, optional
Adjustment to df_resid, see cov_type 'cluster' above
**Reminder**: ``use_correction`` in "hac-groupsum" and "hac-panel" is
not bool, needs to be in {False, 'hac', 'cluster'}.
.. todo:: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
from statsmodels.base.covtype import descriptions, normalize_cov_type
import statsmodels.stats.sandwich_covariance as sw
cov_type = normalize_cov_type(cov_type)
if 'kernel' in kwargs:
kwargs['weights_func'] = kwargs.pop('kernel')
if 'weights_func' in kwargs and not callable(kwargs['weights_func']):
kwargs['weights_func'] = sw.kernel_dict[kwargs['weights_func']]
# TODO: make separate function that returns a robust cov plus info
use_self = kwargs.pop('use_self', False)
if use_self:
res = self
else:
res = self.__class__(
self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t': use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'hac-panel', 'hac-groupsum']:
df_correction = kwargs.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user did not explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwargs, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwargs
if cov_type in ['fixed scale', 'fixed_scale']:
res.cov_kwds['description'] = descriptions['fixed_scale']
res.cov_kwds['scale'] = scale = kwargs.get('scale', 1.)
res.cov_params_default = scale * res.normalized_cov_params
elif cov_type.upper() in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwargs:
raise ValueError('heteroscedasticity robust covariance '
'does not use keywords')
res.cov_kwds['description'] = descriptions[cov_type.upper()]
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper())
elif cov_type.lower() == 'hac':
# TODO: check if required, default in cov_hac_simple
maxlags = kwargs['maxlags']
res.cov_kwds['maxlags'] = maxlags
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
use_correction = kwargs.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = descriptions['HAC'].format(
maxlags=maxlags,
correction=['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(
self, nlags=maxlags, weights_func=weights_func,
use_correction=use_correction)
elif cov_type.lower() == 'cluster':
# cluster robust standard errors, one- or two-way
groups = kwargs['groups']
if not hasattr(groups, 'shape'):
groups = [np.squeeze(np.asarray(group)) for group in groups]
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwargs.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(
self, groups, use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:, 0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(
self, groups, use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = descriptions['cluster']
elif cov_type.lower() == 'hac-panel':
# cluster robust standard errors
res.cov_kwds['time'] = time = kwargs.get('time', None)
res.cov_kwds['groups'] = groups = kwargs.get('groups', None)
# TODO: nlags is currently required
# nlags = kwargs.get('nlags', True)
# res.cov_kwds['nlags'] = nlags
# TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwargs['maxlags']
use_correction = kwargs.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if groups is not None:
groups = np.asarray(groups)
tt = (np.nonzero(groups[:-1] != groups[1:])[0] + 1).tolist()
nobs_ = len(groups)
elif time is not None:
time = np.asarray(time)
# TODO: clumsy time index in cov_nw_panel
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1).tolist()
nobs_ = len(time)
else:
raise ValueError('either time or groups needs to be given')
groupidx = lzip([0] + tt, tt + [nobs_])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(
self,
maxlags,
groupidx,
weights_func=weights_func,
use_correction=use_correction
)
res.cov_kwds['description'] = descriptions['HAC-Panel']
elif cov_type.lower() == 'hac-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwargs['time']
# TODO: nlags is currently required
# nlags = kwargs.get('nlags', True)
# res.cov_kwds['nlags'] = nlags
# TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwargs['maxlags']
use_correction = kwargs.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(
self, maxlags, time, weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Groupsum']
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res | Create new results instance with robust covariance as default.
Parameters
----------
cov_type : str
The type of robust sandwich estimator to use. See Notes below.
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`True` if the cov_type is nonrobust, and `False` in all other
cases.
**kwargs
Required or optional arguments for robust covariance calculation.
See Notes below.
Returns
-------
RegressionResults
This method creates a new results instance with the
requested robust covariance as the default covariance of
the parameters. Inferential statistics like p-values and
hypothesis tests will be based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' uses a predefined scale
``scale``: float, optional
Argument to set the scale. Default is 1.
- 'HC0', 'HC1', 'HC2', 'HC3': heteroscedasticity robust covariance
- no keyword arguments
- 'HAC': heteroskedasticity-autocorrelation robust covariance
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
kernels currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
``use_correction``: bool, optional
If true, use small sample correction
- 'cluster': clustered covariance estimator
``groups`` : array_like[int], required :
Integer-valued index of clusters or groups.
``use_correction``: bool, optional
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
``df_correction``: bool, optional
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is also
adjusted. When `use_t` is also True, then pvalues are
computed using the Student's t distribution using the
corrected values. These may differ substantially from
p-values based on the normal is the number of groups is
small.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum': Driscoll and Kraay, heteroscedasticity and
autocorrelation robust covariance for panel data
# TODO: more options needed here
``time`` : array_like, required
index of time periods
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
The available kernels are ['bartlett', 'uniform']. The default is
Bartlett.
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the the sandwich covariance is calculated without small
sample correction. If `use_correction = 'cluster'` (default),
then the same small sample correction as in the case of
`covtype='cluster'` is used.
``df_correction`` : bool, optional
The adjustment to df_resid, see cov_type 'cluster' above
- 'hac-panel': heteroscedasticity and autocorrelation robust standard
errors in panel data. The data needs to be sorted in this case, the
time series for each panel unit or cluster need to be stacked. The
membership to a time series of an individual or group can be either
specified by group indicators or by increasing time periods. One of
``groups`` or ``time`` is required. # TODO: we need more options here
``groups`` : array_like[int]
indicator for groups
``time`` : array_like[int]
index of time periods
``maxlags`` : int, required
number of lags to use
``kernel`` : {callable, str}, optional
Available kernels are ['bartlett', 'uniform'], default
is Bartlett
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the sandwich covariance is calculated without
small sample correction.
``df_correction`` : bool, optional
Adjustment to df_resid, see cov_type 'cluster' above
**Reminder**: ``use_correction`` in "hac-groupsum" and "hac-panel" is
not bool, needs to be in {False, 'hac', 'cluster'}.
.. todo:: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx` | get_robustcov_results | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def summary(
self,
yname: str | None = None,
xname: Sequence[str] | None = None,
title: str | None = None,
alpha: float = 0.05,
slim: bool = False,
):
"""
Summarize the Regression Results.
Parameters
----------
yname : str, optional
Name of endogenous (response) variable. The Default is `y`.
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float, optional
The significance level for the confidence intervals.
slim : bool, optional
Flag indicating to produce reduced set or diagnostic information.
Default is False.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : A class that holds summary results.
Notes
-----
For more information on regression results and diagnostic table,
see our documentation of `Examples/Linear Regression Models/Regression diagnostics`.
"""
from statsmodels.stats.stattools import (
durbin_watson,
jarque_bera,
omni_normtest,
)
alpha = float_like(alpha, "alpha", optional=False)
slim = bool_like(slim, "slim", optional=False, strict=True)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
# TODO: Avoid adding attributes in non-__init__
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[-1])
# TODO not used yet
# diagn_left_header = ['Models stats']
# diagn_right_header = ['Residual stats']
# TODO: requiring list/iterable is a bit annoying
# need more control over formatting
# TODO: default do not work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
rsquared_type = '' if self.k_constant else ' (uncentered)'
top_right = [('R-squared' + rsquared_type + ':',
["%#8.3f" % self.rsquared]),
('Adj. R-squared' + rsquared_type + ':',
["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue]),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
if slim:
slimlist = ['Dep. Variable:', 'Model:', 'No. Observations:',
'Covariance Type:', 'R-squared:', 'Adj. R-squared:',
'F-statistic:', 'Prob (F-statistic):']
diagn_left = diagn_right = []
top_left = [elem for elem in top_left if elem[0] in slimlist]
top_right = [elem for elem in top_right if elem[0] in slimlist]
top_right = top_right + \
[("", [])] * (len(top_left) - len(top_right))
else:
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:',
["%#8.3f" % durbin_watson(self.wresid)]
),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
# create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
if not slim:
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
# add warnings/notes, added to text format only
etext = []
if not self.k_constant:
etext.append(
"R² is computed without centering (uncentered) since the "
"model does not contain a constant."
)
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: # TODO: what is recommended?
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
etext = [f"[{i + 1}] {text}"
for i, text in enumerate(etext)]
etext.insert(0, "Notes:")
smry.add_extra_txt(etext)
return smry | Summarize the Regression Results.
Parameters
----------
yname : str, optional
Name of endogenous (response) variable. The Default is `y`.
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float, optional
The significance level for the confidence intervals.
slim : bool, optional
Flag indicating to produce reduced set or diagnostic information.
Default is False.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : A class that holds summary results.
Notes
-----
For more information on regression results and diagnostic table,
see our documentation of `Examples/Linear Regression Models/Regression diagnostics`. | summary | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def summary2(
self,
yname: str | None = None,
xname: Sequence[str] | None = None,
title: str | None = None,
alpha: float = 0.05,
float_format: str = "%.4f",
):
"""
Experimental summary function to summarize the regression results.
Parameters
----------
yname : str
The name of the dependent variable (optional).
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
float_format : str
The format for floats in parameters summary.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary
A class that holds summary results.
"""
# Diagnostics
from statsmodels.stats.stattools import (
durbin_watson,
jarque_bera,
omni_normtest,
)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
dw = durbin_watson(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
diagnostic = dict([
('Omnibus:', "%.3f" % omni),
('Prob(Omnibus):', "%.3f" % omnipv),
('Skew:', "%.3f" % skew),
('Kurtosis:', "%.3f" % kurtosis),
('Durbin-Watson:', "%.3f" % dw),
('Jarque-Bera (JB):', "%.3f" % jb),
('Prob(JB):', "%.3f" % jbpv),
('Condition No.:', "%.0f" % condno)
])
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
smry.add_dict(diagnostic)
etext = []
if not self.k_constant:
etext.append(
"R² is computed without centering (uncentered) since the \
model does not contain a constant."
)
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
# Warnings
if eigvals[-1] < 1e-10:
warn = "The smallest eigenvalue is %6.3g. This might indicate that\
there are strong multicollinearity problems or that the design\
matrix is singular." % eigvals[-1]
etext.append(warn)
elif condno > 1000:
warn = "The condition number is large, %6.3g. This might indicate\
that there are strong multicollinearity or other numerical\
problems." % condno
etext.append(warn)
if etext:
etext = [f"[{i + 1}] {text}"
for i, text in enumerate(etext)]
etext.insert(0, "Notes:")
for line in etext:
smry.add_text(line)
return smry | Experimental summary function to summarize the regression results.
Parameters
----------
yname : str
The name of the dependent variable (optional).
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
float_format : str
The format for floats in parameters summary.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary
A class that holds summary results. | summary2 | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def get_influence(self):
"""
Calculate influence and outlier measures.
Returns
-------
OLSInfluence
The instance containing methods to calculate the main influence and
outlier measures for the OLS regression.
See Also
--------
statsmodels.stats.outliers_influence.OLSInfluence
A class that exposes methods to examine observation influence.
"""
from statsmodels.stats.outliers_influence import OLSInfluence
return OLSInfluence(self) | Calculate influence and outlier measures.
Returns
-------
OLSInfluence
The instance containing methods to calculate the main influence and
outlier measures for the OLS regression.
See Also
--------
statsmodels.stats.outliers_influence.OLSInfluence
A class that exposes methods to examine observation influence. | get_influence | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def outlier_test(self, method='bonf', alpha=.05, labels=None,
order=False, cutoff=None):
"""
Test observations for outliers according to method.
Parameters
----------
method : str
The method to use in the outlier test. Must be one of:
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
The familywise error rate (FWER).
labels : None or array_like
If `labels` is not None, then it will be used as index to the
returned pandas DataFrame. See also Returns below.
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be
sorted.
cutoff : None or float in [0, 1]
If cutoff is not None, then the return only includes observations
with multiple testing corrected p-values strictly below the cutoff.
The returned array or dataframe can be empty if t.
Returns
-------
array_like
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from statsmodels.stats.outliers_influence import outlier_test
return outlier_test(self, method, alpha, labels=labels,
order=order, cutoff=cutoff) | Test observations for outliers according to method.
Parameters
----------
method : str
The method to use in the outlier test. Must be one of:
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
The familywise error rate (FWER).
labels : None or array_like
If `labels` is not None, then it will be used as index to the
returned pandas DataFrame. See also Returns below.
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be
sorted.
cutoff : None or float in [0, 1]
If cutoff is not None, then the return only includes observations
with multiple testing corrected p-values strictly below the cutoff.
The returned array or dataframe can be empty if t.
Returns
-------
array_like
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1. | outlier_test | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def el_test(self, b0_vals, param_nums, return_weights=0, ret_params=0,
method='nm', stochastic_exog=1):
"""
Test single or joint hypotheses using Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested.
param_nums : 1darray
The parameter number to be tested.
return_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. The default is False.
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. The default is False.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.el_test([0], [1])
>>> (27.248146353888796, 1.7894660442330235e-07)
"""
params = np.copy(self.params)
opt_fun_inst = _ELRegOpts() # to store weights
if len(param_nums) == len(params):
llr = opt_fun_inst._opt_nuis_regress(
[],
param_nums=param_nums,
endog=self.model.endog,
exog=self.model.exog,
nobs=self.model.nobs,
nvar=self.model.exog.shape[1],
params=params,
b0_vals=b0_vals,
stochastic_exog=stochastic_exog)
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
x0 = np.delete(params, param_nums)
args = (param_nums, self.model.endog, self.model.exog,
self.model.nobs, self.model.exog.shape[1], params,
b0_vals, stochastic_exog)
if method == 'nm':
llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0,
maxfun=10000, maxiter=10000, full_output=1,
disp=0, args=args)[1]
if method == 'powell':
llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,
full_output=1, disp=0,
args=args)[1]
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if ret_params:
return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params
elif return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval | Test single or joint hypotheses using Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested.
param_nums : 1darray
The parameter number to be tested.
return_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. The default is False.
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. The default is False.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.el_test([0], [1])
>>> (27.248146353888796, 1.7894660442330235e-07) | el_test | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def conf_int_el(self, param_num, sig=.05, upper_bound=None,
lower_bound=None, method='nm', stochastic_exog=True):
"""
Compute the confidence interval using Empirical Likelihood.
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired.
sig : float
The significance level. Default is 0.05.
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
lowerl : float
The lower bound of the confidence interval.
upperl : float
The upper bound of the confidence interval.
See Also
--------
el_test : Test parameters using Empirical Likelihood.
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical value.
The function returns the results of each iteration of brentq at each
value of beta.
The current function value of the last printed optimization should be
the critical value at the desired significance level. For alpha=.05,
the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to do
el_test([lower_limit], [param_num]).
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed.
"""
r0 = stats.chi2.ppf(1 - sig, 1)
if upper_bound is None:
upper_bound = self.conf_int(.01)[param_num][1]
if lower_bound is None:
lower_bound = self.conf_int(.01)[param_num][0]
def f(b0):
return self.el_test(np.array([b0]), np.array([param_num]),
method=method,
stochastic_exog=stochastic_exog)[0] - r0
lowerl = optimize.brenth(f, lower_bound,
self.params[param_num])
upperl = optimize.brenth(f, self.params[param_num],
upper_bound)
# ^ Seems to be faster than brentq in most cases
return (lowerl, upperl) | Compute the confidence interval using Empirical Likelihood.
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired.
sig : float
The significance level. Default is 0.05.
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
lowerl : float
The lower bound of the confidence interval.
upperl : float
The upper bound of the confidence interval.
See Also
--------
el_test : Test parameters using Empirical Likelihood.
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical value.
The function returns the results of each iteration of brentq at each
value of beta.
The current function value of the last printed optimization should be
the critical value at the desired significance level. For alpha=.05,
the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to do
el_test([lower_limit], [param_num]).
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed. | conf_int_el | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def test_all(self):
d = macrodata.load_pandas().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv'].values))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'].values))
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1].values])
res_ols = OLS(endogg, exogg).fit()
#print res_ols.params
mod_g1 = GLSAR(endogg, exogg, rho=-0.108136)
res_g1 = mod_g1.fit()
#print res_g1.params
mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R
res_g2 = mod_g2.iterative_fit(maxiter=5)
#print res_g2.params
rho = -0.108136
# coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
partable = np.array([
[-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # ***
[ 4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # ***
[-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346]]) # **
#Statistics based on the rho-differenced data:
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.113973),
endog_std = ("S.D. dependent var", 18.67447),
ssr = ("Sum squared resid", 22530.90),
mse_resid_sqrt = ("S.E. of regression", 10.66735),
rsquared = ("R-squared", 0.676973),
rsquared_adj = ("Adjusted R-squared", 0.673710),
fvalue = ("F(2, 198)", 221.0475),
f_pvalue = ("P-value(F)", 3.56e-51),
resid_acf1 = ("rho", -0.003481),
dw = ("Durbin-Watson", 1.993858))
#fstatistic, p-value, df1, df2
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
#LM-statistic, p-value, df
arch_4 = [7.30776, 0.120491, 4, "chi2"]
#multicollinearity
#Chi-square(2): test-statistic, pvalue, df
#tests
res = res_g1 #with rho from Gretl
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 6)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)
assert_allclose(res.f_pvalue,
result_gretl_g1['f_pvalue'][1],
rtol=1e-2)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=4)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
#tests
res = res_g2 #with estimated rho
#estimated lag coefficient
assert_almost_equal(res.model.rho, rho, decimal=3)
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 3)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)
assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(2,4))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(2,4))
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=1)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=2)
'''
Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858
'''
'''
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023:
'''
'''
Test for ARCH of order 4
coefficient std. error t-ratio p-value
--------------------------------------------------------
alpha(0) 97.0386 20.3234 4.775 3.56e-06 ***
alpha(1) 0.176114 0.0714698 2.464 0.0146 **
alpha(2) -0.0488339 0.0724981 -0.6736 0.5014
alpha(3) -0.0705413 0.0737058 -0.9571 0.3397
alpha(4) 0.0384531 0.0725763 0.5298 0.5968
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491:
'''
'''
Variance Inflation Factors
Minimum possible value = 1.0
Values > 10.0 may indicate a collinearity problem
ds_l_realgdp 1.002
realint_1 1.002
VIF(j) = 1/(1 - R(j)^2), where R(j) is the multiple correlation coefficient
between variable j and the other independent variables
Properties of matrix X'X:
1-norm = 6862.0664
Determinant = 1.0296049e+009
Reciprocal condition number = 0.013819244
'''
'''
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491
Test of common factor restriction -
Null hypothesis: restriction is acceptable
Test statistic: F(2, 195) = 0.426391
with p-value = P(F(2, 195) > 0.426391) = 0.653468
Test for normality of residual -
Null hypothesis: error is normally distributed
Test statistic: Chi-square(2) = 20.2792
with p-value = 3.94837e-005:
'''
#no idea what this is
'''
Augmented regression for common factor test
OLS, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
coefficient std. error t-ratio p-value
---------------------------------------------------------------
const -10.9481 1.35807 -8.062 7.44e-014 ***
ds_l_realgdp 4.28893 0.229459 18.69 2.40e-045 ***
realint_1 -0.662644 0.334872 -1.979 0.0492 **
ds_l_realinv_1 -0.108892 0.0715042 -1.523 0.1294
ds_l_realgdp_1 0.660443 0.390372 1.692 0.0923 *
realint_2 0.0769695 0.341527 0.2254 0.8219
Sum of squared residuals = 22432.8
Test of common factor restriction
Test statistic: F(2, 195) = 0.426391, with p-value = 0.653468
'''
################ with OLS, HAC errors
#Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
#Dependent variable: ds_l_realinv
#HAC standard errors, bandwidth 4 (Bartlett kernel)
#coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
#for confidence interval t(199, 0.025) = 1.972
partable = np.array([
[-9.48167, 1.17709, -8.055, 7.17e-014, -11.8029, -7.16049], # ***
[4.37422, 0.328787, 13.30, 2.62e-029, 3.72587, 5.02258], #***
[-0.613997, 0.293619, -2.091, 0.0378, -1.19300, -0.0349939]]) # **
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.257395),
endog_std = ("S.D. dependent var", 18.73915),
ssr = ("Sum squared resid", 22799.68),
mse_resid_sqrt = ("S.E. of regression", 10.70380),
rsquared = ("R-squared", 0.676978),
rsquared_adj = ("Adjusted R-squared", 0.673731),
fvalue = ("F(2, 199)", 90.79971),
f_pvalue = ("P-value(F)", 9.53e-29),
llf = ("Log-likelihood", -763.9752),
aic = ("Akaike criterion", 1533.950),
bic = ("Schwarz criterion", 1543.875),
hqic = ("Hannan-Quinn", 1537.966),
resid_acf1 = ("rho", -0.107341),
dw = ("Durbin-Watson", 2.213805))
#for logs: dropping 70 nan or incomplete observations, T=133
#(res_ols.model.exog <=0).any(1).sum() = 69 ?not 70
linear_squares = [7.52477, 0.0232283, 2, "chi2"]
#Autocorrelation, Breusch-Godfrey test for autocorrelation up to order 4
#break
#see cusum results in files
arch_4 = [3.43473, 0.487871, 4, "chi2"]
het_white = [33.503723, 0.000003, 5, "chi2"]
het_breusch_pagan_konker = [0.709924, 0.701200, 2, "chi2"]
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
names = 'date residual leverage influence DFFITS'.split()
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir, 'results/leverage_influence_ols_nostars.txt')
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=1,
converters={0:lambda s: s})
#either numpy 1.6 or python 3.2 changed behavior
if np.isnan(lev[-1]['f1']):
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=2,
converters={0:lambda s: s})
lev.dtype.names = names
res = res_ols #for easier copying
cov_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov_hac)
assert_almost_equal(res.params, partable[:,0], 5)
assert_almost_equal(bse_hac, partable[:,1], 5)
#TODO
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=4) #not in gretl
assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6) #FAIL
assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=6) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
#f-value is based on cov_hac I guess
#res2 = res.get_robustcov_results(cov_type='HC1')
# TODO: fvalue differs from Gretl, trying any of the HCx
#assert_almost_equal(res2.fvalue, result_gretl_g1['fvalue'][1], decimal=0) #FAIL
#assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=1) #FAIL
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(6,5))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(6,5))
linear_sq = smsdia.linear_lm(res.resid, res.model.exog)
assert_almost_equal(linear_sq[0], linear_squares[0], decimal=6)
assert_almost_equal(linear_sq[1], linear_squares[1], decimal=7)
hbpk = smsdia.het_breuschpagan(res.resid, res.model.exog)
assert_almost_equal(hbpk[0], het_breusch_pagan_konker[0], decimal=6)
assert_almost_equal(hbpk[1], het_breusch_pagan_konker[1], decimal=6)
hw = smsdia.het_white(res.resid, res.model.exog)
assert_almost_equal(hw[:2], het_white[:2], 6)
#arch
#sm_arch = smsdia.acorr_lm(res.resid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.resid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=5)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
[oi.variance_inflation_factor(res.model.exog, k) for k in [1,2]]
infl = oi.OLSInfluence(res_ols)
#print np.max(np.abs(lev['DFFITS'] - infl.dffits[0]))
#print np.max(np.abs(lev['leverage'] - infl.hat_matrix_diag))
#print np.max(np.abs(lev['influence'] - infl.influence)) #just added this based on Gretl
#just rough test, low decimal in Gretl output,
assert_almost_equal(lev['residual'], res.resid, decimal=3)
assert_almost_equal(lev['DFFITS'], infl.dffits[0], decimal=3)
assert_almost_equal(lev['leverage'], infl.hat_matrix_diag, decimal=3)
assert_almost_equal(lev['influence'], infl.influence, decimal=4) | Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858 | test_all | python | statsmodels/statsmodels | statsmodels/regression/tests/test_glsar_gretl.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/tests/test_glsar_gretl.py | BSD-3-Clause |
def _adjust_missing(self):
"""
Implements alternatives for handling missing values
"""
def keep_col(x):
index = np.logical_not(np.any(np.isnan(x), 0))
return x[:, index], index
def keep_row(x):
index = np.logical_not(np.any(np.isnan(x), 1))
return x[index, :], index
if self._missing == 'drop-col':
self._adjusted_data, index = keep_col(self.data)
self.cols = np.where(index)[0]
self.weights = self.weights[index]
elif self._missing == 'drop-row':
self._adjusted_data, index = keep_row(self.data)
self.rows = np.where(index)[0]
elif self._missing == 'drop-min':
drop_col, drop_col_index = keep_col(self.data)
drop_col_size = drop_col.size
drop_row, drop_row_index = keep_row(self.data)
drop_row_size = drop_row.size
if drop_row_size > drop_col_size:
self._adjusted_data = drop_row
self.rows = np.where(drop_row_index)[0]
else:
self._adjusted_data = drop_col
self.weights = self.weights[drop_col_index]
self.cols = np.where(drop_col_index)[0]
elif self._missing == 'fill-em':
self._adjusted_data = self._fill_missing_em()
elif self._missing is None:
if not np.isfinite(self._adjusted_data).all():
raise ValueError("""\
data contains non-finite values (inf, NaN). You should drop these values or
use one of the methods for adjusting data for missing-values.""")
else:
raise ValueError('missing method is not known.')
if self._index is not None:
self._columns = self._columns[self.cols]
self._index = self._index[self.rows]
# Check adjusted data size
if self._adjusted_data.size == 0:
raise ValueError('Removal of missing values has eliminated '
'all data.') | Implements alternatives for handling missing values | _adjust_missing | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_gls_weights(self):
"""
Computes GLS weights based on percentage of data fit
"""
projection = np.asarray(self.project(transform=False))
errors = self.transformed_data - projection
if self._ncomp == self._nvar:
raise ValueError('gls can only be used when ncomp < nvar '
'so that residuals have non-zero variance')
var = (errors ** 2.0).mean(0)
weights = 1.0 / var
weights = weights / np.sqrt((weights ** 2.0).mean())
nvar = self._nvar
eff_series_perc = (1.0 / sum((weights / weights.sum()) ** 2.0)) / nvar
if eff_series_perc < 0.1:
eff_series = int(np.round(eff_series_perc * nvar))
import warnings
warn = f"""\
Many series are being down weighted by GLS. Of the {nvar} series, the GLS
estimates are based on only {eff_series} (effective) series."""
warnings.warn(warn, EstimationWarning)
self.weights = weights | Computes GLS weights based on percentage of data fit | _compute_gls_weights | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _pca(self):
"""
Main PCA routine
"""
self._compute_eig()
self._compute_pca_from_eig()
self.projection = self.project() | Main PCA routine | _pca | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _prepare_data(self):
"""
Standardize or demean data.
"""
adj_data = self._adjusted_data
if np.all(np.isnan(adj_data)):
return np.empty(adj_data.shape[1]).fill(np.nan)
self._mu = np.nanmean(adj_data, axis=0)
self._sigma = np.sqrt(np.nanmean((adj_data - self._mu) ** 2.0, axis=0))
if self._standardize:
data = (adj_data - self._mu) / self._sigma
elif self._demean:
data = (adj_data - self._mu)
else:
data = adj_data
return data / np.sqrt(self.weights) | Standardize or demean data. | _prepare_data | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_eig(self):
"""
Wrapper for actual eigenvalue method
This is a workaround to avoid instance methods in __dict__
"""
if self._method == 'eig':
return self._compute_using_eig()
elif self._method == 'svd':
return self._compute_using_svd()
else: # self._method == 'nipals'
return self._compute_using_nipals() | Wrapper for actual eigenvalue method
This is a workaround to avoid instance methods in __dict__ | _compute_eig | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.