code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def burg(endog, order=1, demean=True):
"""
Compute Burg's AP(p) parameter estimator.
Parameters
----------
endog : array_like
The endogenous variable.
order : int, optional
Order of the AR. Default is 1.
demean : bool, optional
Flag indicating to subtract the mean from endog before estimation.
Returns
-------
rho : ndarray
The AR(p) coefficients computed using Burg's algorithm.
sigma2 : float
The estimate of the residual variance.
See Also
--------
yule_walker : Estimate AR parameters using the Yule-Walker method.
Notes
-----
AR model estimated includes a constant that is estimated using the sample
mean (see [1]_). This value is not reported.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load()
>>> rho, sigma2 = sm.regression.linear_model.burg(data.endog, order=4)
>>> rho
array([ 1.30934186, -0.48086633, -0.20185982, 0.05501941])
>>> sigma2
271.2467306963966
"""
# Avoid circular imports
from statsmodels.tsa.stattools import levinson_durbin_pacf, pacf_burg
endog = np.squeeze(np.asarray(endog))
if endog.ndim != 1:
raise ValueError('endog must be 1-d or squeezable to 1-d.')
order = int(order)
if order < 1:
raise ValueError('order must be an integer larger than 1')
if demean:
endog = endog - endog.mean()
pacf, sigma = pacf_burg(endog, order, demean=demean)
ar, _ = levinson_durbin_pacf(pacf)
return ar, sigma[-1] | Compute Burg's AP(p) parameter estimator.
Parameters
----------
endog : array_like
The endogenous variable.
order : int, optional
Order of the AR. Default is 1.
demean : bool, optional
Flag indicating to subtract the mean from endog before estimation.
Returns
-------
rho : ndarray
The AR(p) coefficients computed using Burg's algorithm.
sigma2 : float
The estimate of the residual variance.
See Also
--------
yule_walker : Estimate AR parameters using the Yule-Walker method.
Notes
-----
AR model estimated includes a constant that is estimated using the sample
mean (see [1]_). This value is not reported.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load()
>>> rho, sigma2 = sm.regression.linear_model.burg(data.endog, order=4)
>>> rho
array([ 1.30934186, -0.48086633, -0.20185982, 0.05501941])
>>> sigma2
271.2467306963966 | burg | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def conf_int(self, alpha=.05, cols=None):
"""
Compute the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. The default
`alpha` = .05 returns a 95% confidence interval.
cols : array_like, optional
Columns to include in returned confidence intervals.
Returns
-------
array_like
The confidence intervals.
Notes
-----
The confidence interval is based on Student's t-distribution.
"""
# keep method for docstring for now
ci = super().conf_int(alpha=alpha, cols=cols)
return ci | Compute the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. The default
`alpha` = .05 returns a 95% confidence interval.
cols : array_like, optional
Columns to include in returned confidence intervals.
Returns
-------
array_like
The confidence intervals.
Notes
-----
The confidence interval is based on Student's t-distribution. | conf_int | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def nobs(self):
"""Number of observations n."""
return float(self.model.wexog.shape[0]) | Number of observations n. | nobs | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def fittedvalues(self):
"""The predicted values for the original (unwhitened) design."""
return self.model.predict(self.params, self.model.exog) | The predicted values for the original (unwhitened) design. | fittedvalues | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def wresid(self):
"""
The residuals of the transformed/whitened regressand and regressor(s).
"""
return self.model.wendog - self.model.predict(
self.params, self.model.wexog) | The residuals of the transformed/whitened regressand and regressor(s). | wresid | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def resid(self):
"""The residuals of the model."""
return self.model.endog - self.model.predict(
self.params, self.model.exog) | The residuals of the model. | resid | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def scale(self):
"""
A scale factor for the covariance matrix.
The Default value is ssr/(n-p). Note that the square root of `scale`
is often called the standard error of the regression.
"""
wresid = self.wresid
return np.dot(wresid, wresid) / self.df_resid | A scale factor for the covariance matrix.
The Default value is ssr/(n-p). Note that the square root of `scale`
is often called the standard error of the regression. | scale | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def ssr(self):
"""Sum of squared (whitened) residuals."""
wresid = self.wresid
return np.dot(wresid, wresid) | Sum of squared (whitened) residuals. | ssr | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def centered_tss(self):
"""The total (weighted) sum of squares centered about the mean."""
model = self.model
weights = getattr(model, 'weights', None)
sigma = getattr(model, 'sigma', None)
if weights is not None:
mean = np.average(model.endog, weights=weights)
return np.sum(weights * (model.endog - mean)**2)
elif sigma is not None:
# Exactly matches WLS when sigma is diagonal
iota = np.ones_like(model.endog)
iota = model.whiten(iota)
mean = model.wendog.dot(iota) / iota.dot(iota)
err = model.endog - mean
err = model.whiten(err)
return np.sum(err**2)
else:
centered_endog = model.wendog - model.wendog.mean()
return np.dot(centered_endog, centered_endog) | The total (weighted) sum of squares centered about the mean. | centered_tss | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def uncentered_tss(self):
"""
Uncentered sum of squares.
The sum of the squared values of the (whitened) endogenous response
variable.
"""
wendog = self.model.wendog
return np.dot(wendog, wendog) | Uncentered sum of squares.
The sum of the squared values of the (whitened) endogenous response
variable. | uncentered_tss | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def ess(self):
"""
The explained sum of squares.
If a constant is present, the centered total sum of squares minus the
sum of squared residuals. If there is no constant, the uncentered total
sum of squares is used.
"""
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr | The explained sum of squares.
If a constant is present, the centered total sum of squares minus the
sum of squared residuals. If there is no constant, the uncentered total
sum of squares is used. | ess | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def rsquared(self):
"""
R-squared of the model.
This is defined here as 1 - `ssr`/`centered_tss` if the constant is
included in the model and 1 - `ssr`/`uncentered_tss` if the constant is
omitted.
"""
if self.k_constant:
return 1 - self.ssr/self.centered_tss
else:
return 1 - self.ssr/self.uncentered_tss | R-squared of the model.
This is defined here as 1 - `ssr`/`centered_tss` if the constant is
included in the model and 1 - `ssr`/`uncentered_tss` if the constant is
omitted. | rsquared | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def rsquared_adj(self):
"""
Adjusted R-squared.
This is defined here as 1 - (`nobs`-1)/`df_resid` * (1-`rsquared`)
if a constant is included and 1 - `nobs`/`df_resid` * (1-`rsquared`) if
no constant is included.
"""
return 1 - (np.divide(self.nobs - self.k_constant, self.df_resid)
* (1 - self.rsquared)) | Adjusted R-squared.
This is defined here as 1 - (`nobs`-1)/`df_resid` * (1-`rsquared`)
if a constant is included and 1 - `nobs`/`df_resid` * (1-`rsquared`) if
no constant is included. | rsquared_adj | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def mse_model(self):
"""
Mean squared error the model.
The explained sum of squares divided by the model degrees of freedom.
"""
if np.all(self.df_model == 0.0):
return np.full_like(self.ess, np.nan)
return self.ess/self.df_model | Mean squared error the model.
The explained sum of squares divided by the model degrees of freedom. | mse_model | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def mse_resid(self):
"""
Mean squared error of the residuals.
The sum of squared residuals divided by the residual degrees of
freedom.
"""
if np.all(self.df_resid == 0.0):
return np.full_like(self.ssr, np.nan)
return self.ssr/self.df_resid | Mean squared error of the residuals.
The sum of squared residuals divided by the residual degrees of
freedom. | mse_resid | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def mse_total(self):
"""
Total mean squared error.
The uncentered total sum of squares divided by the number of
observations.
"""
if np.all(self.df_resid + self.df_model == 0.0):
return np.full_like(self.centered_tss, np.nan)
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model) | Total mean squared error.
The uncentered total sum of squares divided by the number of
observations. | mse_total | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def fvalue(self):
"""
F-statistic of the fully specified model.
Calculated as the mean squared error of the model divided by the mean
squared error of the residuals if the nonrobust covariance is used.
Otherwise computed using a Wald-like quadratic form that tests whether
all coefficients (excluding the constant) are zero.
"""
if hasattr(self, 'cov_type') and self.cov_type != 'nonrobust':
# with heteroscedasticity or correlation robustness
k_params = self.normalized_cov_params.shape[0]
mat = np.eye(k_params)
const_idx = self.model.data.const_idx
# TODO: What if model includes implicit constant, e.g. all
# dummies but no constant regressor?
# TODO: Restats as LM test by projecting orthogonalizing
# to constant?
if self.model.data.k_constant == 1:
# if constant is implicit, return nan see #2444
if const_idx is None:
return np.nan
idx = lrange(k_params)
idx.pop(const_idx)
mat = mat[idx] # remove constant
if mat.size == 0: # see #3642
return np.nan
ft = self.f_test(mat)
# using backdoor to set another attribute that we already have
self._cache['f_pvalue'] = float(ft.pvalue)
return float(ft.fvalue)
else:
# for standard homoscedastic case
return self.mse_model/self.mse_resid | F-statistic of the fully specified model.
Calculated as the mean squared error of the model divided by the mean
squared error of the residuals if the nonrobust covariance is used.
Otherwise computed using a Wald-like quadratic form that tests whether
all coefficients (excluding the constant) are zero. | fvalue | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def f_pvalue(self):
"""The p-value of the F-statistic."""
# Special case for df_model 0
if self.df_model == 0:
return np.full_like(self.fvalue, np.nan)
return stats.f.sf(self.fvalue, self.df_model, self.df_resid) | The p-value of the F-statistic. | f_pvalue | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def bse(self):
"""The standard errors of the parameter estimates."""
return np.sqrt(np.diag(self.cov_params())) | The standard errors of the parameter estimates. | bse | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def info_criteria(self, crit, dk_params=0):
"""Return an information criterion for the model.
Parameters
----------
crit : string
One of 'aic', 'bic', 'aicc' or 'hqic'.
dk_params : int or float
Correction to the number of parameters used in the information
criterion. By default, only mean parameters are included, the
scale parameter is not included in the parameter count.
Use ``dk_params=1`` to include scale in the parameter count.
Returns
-------
Value of information criterion.
References
----------
Burnham KP, Anderson KR (2002). Model Selection and Multimodel
Inference; Springer New York.
"""
crit = crit.lower()
k_params = self.df_model + self.k_constant + dk_params
if crit == "aic":
return -2 * self.llf + 2 * k_params
elif crit == "bic":
bic = -2*self.llf + np.log(self.nobs) * k_params
return bic
elif crit == "aicc":
from statsmodels.tools.eval_measures import aicc
return aicc(self.llf, self.nobs, k_params)
elif crit == "hqic":
from statsmodels.tools.eval_measures import hqic
return hqic(self.llf, self.nobs, k_params) | Return an information criterion for the model.
Parameters
----------
crit : string
One of 'aic', 'bic', 'aicc' or 'hqic'.
dk_params : int or float
Correction to the number of parameters used in the information
criterion. By default, only mean parameters are included, the
scale parameter is not included in the parameter count.
Use ``dk_params=1`` to include scale in the parameter count.
Returns
-------
Value of information criterion.
References
----------
Burnham KP, Anderson KR (2002). Model Selection and Multimodel
Inference; Springer New York. | info_criteria | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def eigenvals(self):
"""
Return eigenvalues sorted in decreasing order.
"""
if self._wexog_singular_values is not None:
eigvals = self._wexog_singular_values ** 2
else:
wx = self.model.wexog
eigvals = np.linalg.eigvalsh(wx.T @ wx)
return np.sort(eigvals)[::-1] | Return eigenvalues sorted in decreasing order. | eigenvals | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def condition_number(self):
"""
Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest singular value of the
exogenous variables. This value is the same as the square root of
the ratio of the largest to smallest eigenvalue of the inner-product
of the exogenous variables.
"""
eigvals = self.eigenvals
return np.sqrt(eigvals[0]/eigvals[-1]) | Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest singular value of the
exogenous variables. This value is the same as the square root of
the ratio of the largest to smallest eigenvalue of the inner-product
of the exogenous variables. | condition_number | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def cov_HC0(self):
"""
Heteroscedasticity robust covariance matrix. See HC0_se.
"""
self.het_scale = self.wresid**2
cov_HC0 = self._HCCM(self.het_scale)
return cov_HC0 | Heteroscedasticity robust covariance matrix. See HC0_se. | cov_HC0 | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def cov_HC1(self):
"""
Heteroscedasticity robust covariance matrix. See HC1_se.
"""
self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2)
cov_HC1 = self._HCCM(self.het_scale)
return cov_HC1 | Heteroscedasticity robust covariance matrix. See HC1_se. | cov_HC1 | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def cov_HC2(self):
"""
Heteroscedasticity robust covariance matrix. See HC2_se.
"""
wexog = self.model.wexog
h = self._abat_diagonal(wexog, self.normalized_cov_params)
self.het_scale = self.wresid**2/(1-h)
cov_HC2 = self._HCCM(self.het_scale)
return cov_HC2 | Heteroscedasticity robust covariance matrix. See HC2_se. | cov_HC2 | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def cov_HC3(self):
"""
Heteroscedasticity robust covariance matrix. See HC3_se.
"""
wexog = self.model.wexog
h = self._abat_diagonal(wexog, self.normalized_cov_params)
self.het_scale = (self.wresid / (1 - h))**2
cov_HC3 = self._HCCM(self.het_scale)
return cov_HC3 | Heteroscedasticity robust covariance matrix. See HC3_se. | cov_HC3 | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def HC0_se(self):
"""
White's (1980) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i].
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2.
"""
return np.sqrt(np.diag(self.cov_HC0)) | White's (1980) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i].
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2. | HC0_se | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def HC1_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(n/(n-p)*HC_0).
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2.
"""
return np.sqrt(np.diag(self.cov_HC1)) | MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(n/(n-p)*HC_0).
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2. | HC1_se | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def HC2_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii).
"""
return np.sqrt(np.diag(self.cov_HC2)) | MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii). | HC2_se | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def HC3_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2).
"""
return np.sqrt(np.diag(self.cov_HC3)) | MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2). | HC3_se | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def resid_pearson(self):
"""
Residuals, normalized to have unit variance.
Returns
-------
array_like
The array `wresid` normalized by the sqrt of the scale to have
unit variance.
"""
if not hasattr(self, 'resid'):
raise ValueError('Method requires residuals.')
eps = np.finfo(self.wresid.dtype).eps
if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean():
# do not divide if scale is zero close to numerical precision
warnings.warn(
"All residuals are 0, cannot compute normed residuals.",
RuntimeWarning
)
return self.wresid
else:
return self.wresid / np.sqrt(self.scale) | Residuals, normalized to have unit variance.
Returns
-------
array_like
The array `wresid` normalized by the sqrt of the scale to have
unit variance. | resid_pearson | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def _is_nested(self, restricted):
"""
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller
model are spanned by the regressors in the larger model and
the regressand is identical.
"""
if self.model.nobs != restricted.model.nobs:
return False
full_rank = self.model.rank
restricted_rank = restricted.model.rank
if full_rank <= restricted_rank:
return False
restricted_exog = restricted.model.wexog
full_wresid = self.wresid
scores = restricted_exog * full_wresid[:, None]
score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2))
# TODO: Could be improved, and may fail depending on scale of
# regressors
return np.allclose(score_l2, 0) | Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller
model are spanned by the regressors in the larger model and
the regressand is identical. | _is_nested | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def compare_lm_test(self, restricted, demean=True, use_lr=False):
"""
Use Lagrange Multiplier test to test a set of linear restrictions.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the
residuals from the restricted model. If True, the covariance of
the scores are used and the LM test is identical to the large
sample version of the LR test.
use_lr : bool
A flag indicating whether to estimate the covariance of the model
scores using the unrestricted model. Setting the to True improves
the power of the test.
Returns
-------
lm_value : float
The test statistic which has a chi2 distributed.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The LM test examines whether the scores from the restricted model are
0. If the null is true, and the restrictions are valid, then the
parameters of the restricted model should be close to the minimum of
the sum of squared errors, and so the scores should be close to zero,
on average.
"""
from numpy.linalg import inv
import statsmodels.stats.sandwich_covariance as sw
if not self._is_nested(restricted):
raise ValueError("Restricted model is not nested by full model.")
wresid = restricted.wresid
wexog = self.model.wexog
scores = wexog * wresid[:, None]
n = self.nobs
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
s = scores.mean(axis=0)
if use_lr:
scores = wexog * self.wresid[:, None]
demean = False
if demean:
scores = scores - scores.mean(0)[None, :]
# Form matters here. If homoskedastics can be sigma^2 (X'X)^-1
# If Heteroskedastic then the form below is fine
# If HAC then need to use HAC
# If Cluster, should use cluster
cov_type = getattr(self, 'cov_type', 'nonrobust')
if cov_type == 'nonrobust':
sigma2 = np.mean(wresid**2)
xpx = np.dot(wexog.T, wexog) / n
s_inv = inv(sigma2 * xpx)
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
s_inv = inv(np.dot(scores.T, scores) / n)
elif cov_type == 'HAC':
maxlags = self.cov_kwds['maxlags']
s_inv = inv(sw.S_hac_simple(scores, maxlags) / n)
elif cov_type == 'cluster':
# cluster robust standard errors
groups = self.cov_kwds['groups']
# TODO: Might need demean option in S_crosssection by group?
s_inv = inv(sw.S_crosssection(scores, groups))
else:
raise ValueError('Only nonrobust, HC, HAC and cluster are ' +
'currently connected')
lm_value = n * (s @ s_inv @ s.T)
p_value = stats.chi2.sf(lm_value, df_diff)
return lm_value, p_value, df_diff | Use Lagrange Multiplier test to test a set of linear restrictions.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the
residuals from the restricted model. If True, the covariance of
the scores are used and the LM test is identical to the large
sample version of the LR test.
use_lr : bool
A flag indicating whether to estimate the covariance of the model
scores using the unrestricted model. Setting the to True improves
the power of the test.
Returns
-------
lm_value : float
The test statistic which has a chi2 distributed.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The LM test examines whether the scores from the restricted model are
0. If the null is true, and the restrictions are valid, then the
parameters of the restricted model should be close to the minimum of
the sum of squared errors, and so the scores should be close to zero,
on average. | compare_lm_test | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def compare_f_test(self, restricted):
"""
Use F test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
Returns
-------
f_value : float
The test statistic which has an F distribution.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in
df between models.
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two
models. This is not a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results under
the assumption of homoscedasticity and no autocorrelation
(sphericity).
"""
has_robust1 = getattr(self, 'cov_type', 'nonrobust') != 'nonrobust'
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('F test for comparison is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
ssr_full = self.ssr
ssr_restr = restricted.ssr
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full
p_value = stats.f.sf(f_value, df_diff, df_full)
return f_value, p_value, df_diff | Use F test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
Returns
-------
f_value : float
The test statistic which has an F distribution.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in
df between models.
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two
models. This is not a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results under
the assumption of homoscedasticity and no autocorrelation
(sphericity). | compare_f_test | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def compare_lr_test(self, restricted, large_sample=False):
"""
Likelihood ratio test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
The likelihood ratio which is chisquare distributed with df_diff
degrees of freedom.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The exact likelihood ratio is valid for homoskedastic data,
and is defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\\mathcal{L}` is the likelihood of the
model. With :math:`D` distributed as chisquare with df equal
to difference in number of parameters or equivalently
difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
"""
# TODO: put into separate function, needs tests
# See mailing list discussion October 17,
if large_sample:
return self.compare_lm_test(restricted, use_lr=True)
has_robust1 = (getattr(self, 'cov_type', 'nonrobust') != 'nonrobust')
has_robust2 = (
getattr(restricted, 'cov_type', 'nonrobust') != 'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('Likelihood Ratio test is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
llf_full = self.llf
llf_restr = restricted.llf
df_full = self.df_resid
df_restr = restricted.df_resid
lrdf = (df_restr - df_full)
lrstat = -2*(llf_restr - llf_full)
lr_pvalue = stats.chi2.sf(lrstat, lrdf)
return lrstat, lr_pvalue, lrdf | Likelihood ratio test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
The likelihood ratio which is chisquare distributed with df_diff
degrees of freedom.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The exact likelihood ratio is valid for homoskedastic data,
and is defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\\mathcal{L}` is the likelihood of the
model. With :math:`D` distributed as chisquare with df equal
to difference in number of parameters or equivalently
difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model. | compare_lr_test | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwargs):
"""
Create new results instance with robust covariance as default.
Parameters
----------
cov_type : str
The type of robust sandwich estimator to use. See Notes below.
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`True` if the cov_type is nonrobust, and `False` in all other
cases.
**kwargs
Required or optional arguments for robust covariance calculation.
See Notes below.
Returns
-------
RegressionResults
This method creates a new results instance with the
requested robust covariance as the default covariance of
the parameters. Inferential statistics like p-values and
hypothesis tests will be based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' uses a predefined scale
``scale``: float, optional
Argument to set the scale. Default is 1.
- 'HC0', 'HC1', 'HC2', 'HC3': heteroscedasticity robust covariance
- no keyword arguments
- 'HAC': heteroskedasticity-autocorrelation robust covariance
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
kernels currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
``use_correction``: bool, optional
If true, use small sample correction
- 'cluster': clustered covariance estimator
``groups`` : array_like[int], required :
Integer-valued index of clusters or groups.
``use_correction``: bool, optional
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
``df_correction``: bool, optional
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is also
adjusted. When `use_t` is also True, then pvalues are
computed using the Student's t distribution using the
corrected values. These may differ substantially from
p-values based on the normal is the number of groups is
small.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum': Driscoll and Kraay, heteroscedasticity and
autocorrelation robust covariance for panel data
# TODO: more options needed here
``time`` : array_like, required
index of time periods
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
The available kernels are ['bartlett', 'uniform']. The default is
Bartlett.
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the the sandwich covariance is calculated without small
sample correction. If `use_correction = 'cluster'` (default),
then the same small sample correction as in the case of
`covtype='cluster'` is used.
``df_correction`` : bool, optional
The adjustment to df_resid, see cov_type 'cluster' above
- 'hac-panel': heteroscedasticity and autocorrelation robust standard
errors in panel data. The data needs to be sorted in this case, the
time series for each panel unit or cluster need to be stacked. The
membership to a time series of an individual or group can be either
specified by group indicators or by increasing time periods. One of
``groups`` or ``time`` is required. # TODO: we need more options here
``groups`` : array_like[int]
indicator for groups
``time`` : array_like[int]
index of time periods
``maxlags`` : int, required
number of lags to use
``kernel`` : {callable, str}, optional
Available kernels are ['bartlett', 'uniform'], default
is Bartlett
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the sandwich covariance is calculated without
small sample correction.
``df_correction`` : bool, optional
Adjustment to df_resid, see cov_type 'cluster' above
**Reminder**: ``use_correction`` in "hac-groupsum" and "hac-panel" is
not bool, needs to be in {False, 'hac', 'cluster'}.
.. todo:: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
from statsmodels.base.covtype import descriptions, normalize_cov_type
import statsmodels.stats.sandwich_covariance as sw
cov_type = normalize_cov_type(cov_type)
if 'kernel' in kwargs:
kwargs['weights_func'] = kwargs.pop('kernel')
if 'weights_func' in kwargs and not callable(kwargs['weights_func']):
kwargs['weights_func'] = sw.kernel_dict[kwargs['weights_func']]
# TODO: make separate function that returns a robust cov plus info
use_self = kwargs.pop('use_self', False)
if use_self:
res = self
else:
res = self.__class__(
self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t': use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'hac-panel', 'hac-groupsum']:
df_correction = kwargs.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user did not explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwargs, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwargs
if cov_type in ['fixed scale', 'fixed_scale']:
res.cov_kwds['description'] = descriptions['fixed_scale']
res.cov_kwds['scale'] = scale = kwargs.get('scale', 1.)
res.cov_params_default = scale * res.normalized_cov_params
elif cov_type.upper() in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwargs:
raise ValueError('heteroscedasticity robust covariance '
'does not use keywords')
res.cov_kwds['description'] = descriptions[cov_type.upper()]
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper())
elif cov_type.lower() == 'hac':
# TODO: check if required, default in cov_hac_simple
maxlags = kwargs['maxlags']
res.cov_kwds['maxlags'] = maxlags
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
use_correction = kwargs.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = descriptions['HAC'].format(
maxlags=maxlags,
correction=['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(
self, nlags=maxlags, weights_func=weights_func,
use_correction=use_correction)
elif cov_type.lower() == 'cluster':
# cluster robust standard errors, one- or two-way
groups = kwargs['groups']
if not hasattr(groups, 'shape'):
groups = [np.squeeze(np.asarray(group)) for group in groups]
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwargs.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(
self, groups, use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:, 0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(
self, groups, use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = descriptions['cluster']
elif cov_type.lower() == 'hac-panel':
# cluster robust standard errors
res.cov_kwds['time'] = time = kwargs.get('time', None)
res.cov_kwds['groups'] = groups = kwargs.get('groups', None)
# TODO: nlags is currently required
# nlags = kwargs.get('nlags', True)
# res.cov_kwds['nlags'] = nlags
# TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwargs['maxlags']
use_correction = kwargs.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if groups is not None:
groups = np.asarray(groups)
tt = (np.nonzero(groups[:-1] != groups[1:])[0] + 1).tolist()
nobs_ = len(groups)
elif time is not None:
time = np.asarray(time)
# TODO: clumsy time index in cov_nw_panel
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1).tolist()
nobs_ = len(time)
else:
raise ValueError('either time or groups needs to be given')
groupidx = lzip([0] + tt, tt + [nobs_])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(
self,
maxlags,
groupidx,
weights_func=weights_func,
use_correction=use_correction
)
res.cov_kwds['description'] = descriptions['HAC-Panel']
elif cov_type.lower() == 'hac-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwargs['time']
# TODO: nlags is currently required
# nlags = kwargs.get('nlags', True)
# res.cov_kwds['nlags'] = nlags
# TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwargs['maxlags']
use_correction = kwargs.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(
self, maxlags, time, weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Groupsum']
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res | Create new results instance with robust covariance as default.
Parameters
----------
cov_type : str
The type of robust sandwich estimator to use. See Notes below.
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`True` if the cov_type is nonrobust, and `False` in all other
cases.
**kwargs
Required or optional arguments for robust covariance calculation.
See Notes below.
Returns
-------
RegressionResults
This method creates a new results instance with the
requested robust covariance as the default covariance of
the parameters. Inferential statistics like p-values and
hypothesis tests will be based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' uses a predefined scale
``scale``: float, optional
Argument to set the scale. Default is 1.
- 'HC0', 'HC1', 'HC2', 'HC3': heteroscedasticity robust covariance
- no keyword arguments
- 'HAC': heteroskedasticity-autocorrelation robust covariance
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
kernels currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
``use_correction``: bool, optional
If true, use small sample correction
- 'cluster': clustered covariance estimator
``groups`` : array_like[int], required :
Integer-valued index of clusters or groups.
``use_correction``: bool, optional
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
``df_correction``: bool, optional
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is also
adjusted. When `use_t` is also True, then pvalues are
computed using the Student's t distribution using the
corrected values. These may differ substantially from
p-values based on the normal is the number of groups is
small.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum': Driscoll and Kraay, heteroscedasticity and
autocorrelation robust covariance for panel data
# TODO: more options needed here
``time`` : array_like, required
index of time periods
``maxlags`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
The available kernels are ['bartlett', 'uniform']. The default is
Bartlett.
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the the sandwich covariance is calculated without small
sample correction. If `use_correction = 'cluster'` (default),
then the same small sample correction as in the case of
`covtype='cluster'` is used.
``df_correction`` : bool, optional
The adjustment to df_resid, see cov_type 'cluster' above
- 'hac-panel': heteroscedasticity and autocorrelation robust standard
errors in panel data. The data needs to be sorted in this case, the
time series for each panel unit or cluster need to be stacked. The
membership to a time series of an individual or group can be either
specified by group indicators or by increasing time periods. One of
``groups`` or ``time`` is required. # TODO: we need more options here
``groups`` : array_like[int]
indicator for groups
``time`` : array_like[int]
index of time periods
``maxlags`` : int, required
number of lags to use
``kernel`` : {callable, str}, optional
Available kernels are ['bartlett', 'uniform'], default
is Bartlett
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the sandwich covariance is calculated without
small sample correction.
``df_correction`` : bool, optional
Adjustment to df_resid, see cov_type 'cluster' above
**Reminder**: ``use_correction`` in "hac-groupsum" and "hac-panel" is
not bool, needs to be in {False, 'hac', 'cluster'}.
.. todo:: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx` | get_robustcov_results | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def summary(
self,
yname: str | None = None,
xname: Sequence[str] | None = None,
title: str | None = None,
alpha: float = 0.05,
slim: bool = False,
):
"""
Summarize the Regression Results.
Parameters
----------
yname : str, optional
Name of endogenous (response) variable. The Default is `y`.
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float, optional
The significance level for the confidence intervals.
slim : bool, optional
Flag indicating to produce reduced set or diagnostic information.
Default is False.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : A class that holds summary results.
Notes
-----
For more information on regression results and diagnostic table,
see our documentation of `Examples/Linear Regression Models/Regression diagnostics`.
"""
from statsmodels.stats.stattools import (
durbin_watson,
jarque_bera,
omni_normtest,
)
alpha = float_like(alpha, "alpha", optional=False)
slim = bool_like(slim, "slim", optional=False, strict=True)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
# TODO: Avoid adding attributes in non-__init__
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[-1])
# TODO not used yet
# diagn_left_header = ['Models stats']
# diagn_right_header = ['Residual stats']
# TODO: requiring list/iterable is a bit annoying
# need more control over formatting
# TODO: default do not work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
rsquared_type = '' if self.k_constant else ' (uncentered)'
top_right = [('R-squared' + rsquared_type + ':',
["%#8.3f" % self.rsquared]),
('Adj. R-squared' + rsquared_type + ':',
["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue]),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
if slim:
slimlist = ['Dep. Variable:', 'Model:', 'No. Observations:',
'Covariance Type:', 'R-squared:', 'Adj. R-squared:',
'F-statistic:', 'Prob (F-statistic):']
diagn_left = diagn_right = []
top_left = [elem for elem in top_left if elem[0] in slimlist]
top_right = [elem for elem in top_right if elem[0] in slimlist]
top_right = top_right + \
[("", [])] * (len(top_left) - len(top_right))
else:
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:',
["%#8.3f" % durbin_watson(self.wresid)]
),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
# create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
if not slim:
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
# add warnings/notes, added to text format only
etext = []
if not self.k_constant:
etext.append(
"R² is computed without centering (uncentered) since the "
"model does not contain a constant."
)
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: # TODO: what is recommended?
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
etext = [f"[{i + 1}] {text}"
for i, text in enumerate(etext)]
etext.insert(0, "Notes:")
smry.add_extra_txt(etext)
return smry | Summarize the Regression Results.
Parameters
----------
yname : str, optional
Name of endogenous (response) variable. The Default is `y`.
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float, optional
The significance level for the confidence intervals.
slim : bool, optional
Flag indicating to produce reduced set or diagnostic information.
Default is False.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : A class that holds summary results.
Notes
-----
For more information on regression results and diagnostic table,
see our documentation of `Examples/Linear Regression Models/Regression diagnostics`. | summary | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def summary2(
self,
yname: str | None = None,
xname: Sequence[str] | None = None,
title: str | None = None,
alpha: float = 0.05,
float_format: str = "%.4f",
):
"""
Experimental summary function to summarize the regression results.
Parameters
----------
yname : str
The name of the dependent variable (optional).
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
float_format : str
The format for floats in parameters summary.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary
A class that holds summary results.
"""
# Diagnostics
from statsmodels.stats.stattools import (
durbin_watson,
jarque_bera,
omni_normtest,
)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
dw = durbin_watson(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
diagnostic = dict([
('Omnibus:', "%.3f" % omni),
('Prob(Omnibus):', "%.3f" % omnipv),
('Skew:', "%.3f" % skew),
('Kurtosis:', "%.3f" % kurtosis),
('Durbin-Watson:', "%.3f" % dw),
('Jarque-Bera (JB):', "%.3f" % jb),
('Prob(JB):', "%.3f" % jbpv),
('Condition No.:', "%.0f" % condno)
])
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
smry.add_dict(diagnostic)
etext = []
if not self.k_constant:
etext.append(
"R² is computed without centering (uncentered) since the \
model does not contain a constant."
)
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
# Warnings
if eigvals[-1] < 1e-10:
warn = "The smallest eigenvalue is %6.3g. This might indicate that\
there are strong multicollinearity problems or that the design\
matrix is singular." % eigvals[-1]
etext.append(warn)
elif condno > 1000:
warn = "The condition number is large, %6.3g. This might indicate\
that there are strong multicollinearity or other numerical\
problems." % condno
etext.append(warn)
if etext:
etext = [f"[{i + 1}] {text}"
for i, text in enumerate(etext)]
etext.insert(0, "Notes:")
for line in etext:
smry.add_text(line)
return smry | Experimental summary function to summarize the regression results.
Parameters
----------
yname : str
The name of the dependent variable (optional).
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
float_format : str
The format for floats in parameters summary.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary
A class that holds summary results. | summary2 | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def get_influence(self):
"""
Calculate influence and outlier measures.
Returns
-------
OLSInfluence
The instance containing methods to calculate the main influence and
outlier measures for the OLS regression.
See Also
--------
statsmodels.stats.outliers_influence.OLSInfluence
A class that exposes methods to examine observation influence.
"""
from statsmodels.stats.outliers_influence import OLSInfluence
return OLSInfluence(self) | Calculate influence and outlier measures.
Returns
-------
OLSInfluence
The instance containing methods to calculate the main influence and
outlier measures for the OLS regression.
See Also
--------
statsmodels.stats.outliers_influence.OLSInfluence
A class that exposes methods to examine observation influence. | get_influence | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def outlier_test(self, method='bonf', alpha=.05, labels=None,
order=False, cutoff=None):
"""
Test observations for outliers according to method.
Parameters
----------
method : str
The method to use in the outlier test. Must be one of:
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
The familywise error rate (FWER).
labels : None or array_like
If `labels` is not None, then it will be used as index to the
returned pandas DataFrame. See also Returns below.
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be
sorted.
cutoff : None or float in [0, 1]
If cutoff is not None, then the return only includes observations
with multiple testing corrected p-values strictly below the cutoff.
The returned array or dataframe can be empty if t.
Returns
-------
array_like
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from statsmodels.stats.outliers_influence import outlier_test
return outlier_test(self, method, alpha, labels=labels,
order=order, cutoff=cutoff) | Test observations for outliers according to method.
Parameters
----------
method : str
The method to use in the outlier test. Must be one of:
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
The familywise error rate (FWER).
labels : None or array_like
If `labels` is not None, then it will be used as index to the
returned pandas DataFrame. See also Returns below.
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be
sorted.
cutoff : None or float in [0, 1]
If cutoff is not None, then the return only includes observations
with multiple testing corrected p-values strictly below the cutoff.
The returned array or dataframe can be empty if t.
Returns
-------
array_like
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1. | outlier_test | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def el_test(self, b0_vals, param_nums, return_weights=0, ret_params=0,
method='nm', stochastic_exog=1):
"""
Test single or joint hypotheses using Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested.
param_nums : 1darray
The parameter number to be tested.
return_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. The default is False.
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. The default is False.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.el_test([0], [1])
>>> (27.248146353888796, 1.7894660442330235e-07)
"""
params = np.copy(self.params)
opt_fun_inst = _ELRegOpts() # to store weights
if len(param_nums) == len(params):
llr = opt_fun_inst._opt_nuis_regress(
[],
param_nums=param_nums,
endog=self.model.endog,
exog=self.model.exog,
nobs=self.model.nobs,
nvar=self.model.exog.shape[1],
params=params,
b0_vals=b0_vals,
stochastic_exog=stochastic_exog)
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
x0 = np.delete(params, param_nums)
args = (param_nums, self.model.endog, self.model.exog,
self.model.nobs, self.model.exog.shape[1], params,
b0_vals, stochastic_exog)
if method == 'nm':
llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0,
maxfun=10000, maxiter=10000, full_output=1,
disp=0, args=args)[1]
if method == 'powell':
llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,
full_output=1, disp=0,
args=args)[1]
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if ret_params:
return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params
elif return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval | Test single or joint hypotheses using Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested.
param_nums : 1darray
The parameter number to be tested.
return_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. The default is False.
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. The default is False.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.el_test([0], [1])
>>> (27.248146353888796, 1.7894660442330235e-07) | el_test | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def conf_int_el(self, param_num, sig=.05, upper_bound=None,
lower_bound=None, method='nm', stochastic_exog=True):
"""
Compute the confidence interval using Empirical Likelihood.
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired.
sig : float
The significance level. Default is 0.05.
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
lowerl : float
The lower bound of the confidence interval.
upperl : float
The upper bound of the confidence interval.
See Also
--------
el_test : Test parameters using Empirical Likelihood.
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical value.
The function returns the results of each iteration of brentq at each
value of beta.
The current function value of the last printed optimization should be
the critical value at the desired significance level. For alpha=.05,
the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to do
el_test([lower_limit], [param_num]).
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed.
"""
r0 = stats.chi2.ppf(1 - sig, 1)
if upper_bound is None:
upper_bound = self.conf_int(.01)[param_num][1]
if lower_bound is None:
lower_bound = self.conf_int(.01)[param_num][0]
def f(b0):
return self.el_test(np.array([b0]), np.array([param_num]),
method=method,
stochastic_exog=stochastic_exog)[0] - r0
lowerl = optimize.brenth(f, lower_bound,
self.params[param_num])
upperl = optimize.brenth(f, self.params[param_num],
upper_bound)
# ^ Seems to be faster than brentq in most cases
return (lowerl, upperl) | Compute the confidence interval using Empirical Likelihood.
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired.
sig : float
The significance level. Default is 0.05.
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
lowerl : float
The lower bound of the confidence interval.
upperl : float
The upper bound of the confidence interval.
See Also
--------
el_test : Test parameters using Empirical Likelihood.
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical value.
The function returns the results of each iteration of brentq at each
value of beta.
The current function value of the last printed optimization should be
the critical value at the desired significance level. For alpha=.05,
the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to do
el_test([lower_limit], [param_num]).
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed. | conf_int_el | python | statsmodels/statsmodels | statsmodels/regression/linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py | BSD-3-Clause |
def test_all(self):
d = macrodata.load_pandas().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv'].values))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'].values))
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1].values])
res_ols = OLS(endogg, exogg).fit()
#print res_ols.params
mod_g1 = GLSAR(endogg, exogg, rho=-0.108136)
res_g1 = mod_g1.fit()
#print res_g1.params
mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R
res_g2 = mod_g2.iterative_fit(maxiter=5)
#print res_g2.params
rho = -0.108136
# coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
partable = np.array([
[-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # ***
[ 4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # ***
[-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346]]) # **
#Statistics based on the rho-differenced data:
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.113973),
endog_std = ("S.D. dependent var", 18.67447),
ssr = ("Sum squared resid", 22530.90),
mse_resid_sqrt = ("S.E. of regression", 10.66735),
rsquared = ("R-squared", 0.676973),
rsquared_adj = ("Adjusted R-squared", 0.673710),
fvalue = ("F(2, 198)", 221.0475),
f_pvalue = ("P-value(F)", 3.56e-51),
resid_acf1 = ("rho", -0.003481),
dw = ("Durbin-Watson", 1.993858))
#fstatistic, p-value, df1, df2
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
#LM-statistic, p-value, df
arch_4 = [7.30776, 0.120491, 4, "chi2"]
#multicollinearity
#Chi-square(2): test-statistic, pvalue, df
#tests
res = res_g1 #with rho from Gretl
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 6)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)
assert_allclose(res.f_pvalue,
result_gretl_g1['f_pvalue'][1],
rtol=1e-2)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=4)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
#tests
res = res_g2 #with estimated rho
#estimated lag coefficient
assert_almost_equal(res.model.rho, rho, decimal=3)
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 3)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)
assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(2,4))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(2,4))
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=1)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=2)
'''
Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858
'''
'''
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023:
'''
'''
Test for ARCH of order 4
coefficient std. error t-ratio p-value
--------------------------------------------------------
alpha(0) 97.0386 20.3234 4.775 3.56e-06 ***
alpha(1) 0.176114 0.0714698 2.464 0.0146 **
alpha(2) -0.0488339 0.0724981 -0.6736 0.5014
alpha(3) -0.0705413 0.0737058 -0.9571 0.3397
alpha(4) 0.0384531 0.0725763 0.5298 0.5968
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491:
'''
'''
Variance Inflation Factors
Minimum possible value = 1.0
Values > 10.0 may indicate a collinearity problem
ds_l_realgdp 1.002
realint_1 1.002
VIF(j) = 1/(1 - R(j)^2), where R(j) is the multiple correlation coefficient
between variable j and the other independent variables
Properties of matrix X'X:
1-norm = 6862.0664
Determinant = 1.0296049e+009
Reciprocal condition number = 0.013819244
'''
'''
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491
Test of common factor restriction -
Null hypothesis: restriction is acceptable
Test statistic: F(2, 195) = 0.426391
with p-value = P(F(2, 195) > 0.426391) = 0.653468
Test for normality of residual -
Null hypothesis: error is normally distributed
Test statistic: Chi-square(2) = 20.2792
with p-value = 3.94837e-005:
'''
#no idea what this is
'''
Augmented regression for common factor test
OLS, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
coefficient std. error t-ratio p-value
---------------------------------------------------------------
const -10.9481 1.35807 -8.062 7.44e-014 ***
ds_l_realgdp 4.28893 0.229459 18.69 2.40e-045 ***
realint_1 -0.662644 0.334872 -1.979 0.0492 **
ds_l_realinv_1 -0.108892 0.0715042 -1.523 0.1294
ds_l_realgdp_1 0.660443 0.390372 1.692 0.0923 *
realint_2 0.0769695 0.341527 0.2254 0.8219
Sum of squared residuals = 22432.8
Test of common factor restriction
Test statistic: F(2, 195) = 0.426391, with p-value = 0.653468
'''
################ with OLS, HAC errors
#Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
#Dependent variable: ds_l_realinv
#HAC standard errors, bandwidth 4 (Bartlett kernel)
#coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
#for confidence interval t(199, 0.025) = 1.972
partable = np.array([
[-9.48167, 1.17709, -8.055, 7.17e-014, -11.8029, -7.16049], # ***
[4.37422, 0.328787, 13.30, 2.62e-029, 3.72587, 5.02258], #***
[-0.613997, 0.293619, -2.091, 0.0378, -1.19300, -0.0349939]]) # **
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.257395),
endog_std = ("S.D. dependent var", 18.73915),
ssr = ("Sum squared resid", 22799.68),
mse_resid_sqrt = ("S.E. of regression", 10.70380),
rsquared = ("R-squared", 0.676978),
rsquared_adj = ("Adjusted R-squared", 0.673731),
fvalue = ("F(2, 199)", 90.79971),
f_pvalue = ("P-value(F)", 9.53e-29),
llf = ("Log-likelihood", -763.9752),
aic = ("Akaike criterion", 1533.950),
bic = ("Schwarz criterion", 1543.875),
hqic = ("Hannan-Quinn", 1537.966),
resid_acf1 = ("rho", -0.107341),
dw = ("Durbin-Watson", 2.213805))
#for logs: dropping 70 nan or incomplete observations, T=133
#(res_ols.model.exog <=0).any(1).sum() = 69 ?not 70
linear_squares = [7.52477, 0.0232283, 2, "chi2"]
#Autocorrelation, Breusch-Godfrey test for autocorrelation up to order 4
#break
#see cusum results in files
arch_4 = [3.43473, 0.487871, 4, "chi2"]
het_white = [33.503723, 0.000003, 5, "chi2"]
het_breusch_pagan_konker = [0.709924, 0.701200, 2, "chi2"]
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
names = 'date residual leverage influence DFFITS'.split()
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir, 'results/leverage_influence_ols_nostars.txt')
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=1,
converters={0:lambda s: s})
#either numpy 1.6 or python 3.2 changed behavior
if np.isnan(lev[-1]['f1']):
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=2,
converters={0:lambda s: s})
lev.dtype.names = names
res = res_ols #for easier copying
cov_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov_hac)
assert_almost_equal(res.params, partable[:,0], 5)
assert_almost_equal(bse_hac, partable[:,1], 5)
#TODO
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=4) #not in gretl
assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6) #FAIL
assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=6) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
#f-value is based on cov_hac I guess
#res2 = res.get_robustcov_results(cov_type='HC1')
# TODO: fvalue differs from Gretl, trying any of the HCx
#assert_almost_equal(res2.fvalue, result_gretl_g1['fvalue'][1], decimal=0) #FAIL
#assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=1) #FAIL
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(6,5))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(6,5))
linear_sq = smsdia.linear_lm(res.resid, res.model.exog)
assert_almost_equal(linear_sq[0], linear_squares[0], decimal=6)
assert_almost_equal(linear_sq[1], linear_squares[1], decimal=7)
hbpk = smsdia.het_breuschpagan(res.resid, res.model.exog)
assert_almost_equal(hbpk[0], het_breusch_pagan_konker[0], decimal=6)
assert_almost_equal(hbpk[1], het_breusch_pagan_konker[1], decimal=6)
hw = smsdia.het_white(res.resid, res.model.exog)
assert_almost_equal(hw[:2], het_white[:2], 6)
#arch
#sm_arch = smsdia.acorr_lm(res.resid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.resid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=5)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
[oi.variance_inflation_factor(res.model.exog, k) for k in [1,2]]
infl = oi.OLSInfluence(res_ols)
#print np.max(np.abs(lev['DFFITS'] - infl.dffits[0]))
#print np.max(np.abs(lev['leverage'] - infl.hat_matrix_diag))
#print np.max(np.abs(lev['influence'] - infl.influence)) #just added this based on Gretl
#just rough test, low decimal in Gretl output,
assert_almost_equal(lev['residual'], res.resid, decimal=3)
assert_almost_equal(lev['DFFITS'], infl.dffits[0], decimal=3)
assert_almost_equal(lev['leverage'], infl.hat_matrix_diag, decimal=3)
assert_almost_equal(lev['influence'], infl.influence, decimal=4) | Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858 | test_all | python | statsmodels/statsmodels | statsmodels/regression/tests/test_glsar_gretl.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/tests/test_glsar_gretl.py | BSD-3-Clause |
def _adjust_missing(self):
"""
Implements alternatives for handling missing values
"""
def keep_col(x):
index = np.logical_not(np.any(np.isnan(x), 0))
return x[:, index], index
def keep_row(x):
index = np.logical_not(np.any(np.isnan(x), 1))
return x[index, :], index
if self._missing == 'drop-col':
self._adjusted_data, index = keep_col(self.data)
self.cols = np.where(index)[0]
self.weights = self.weights[index]
elif self._missing == 'drop-row':
self._adjusted_data, index = keep_row(self.data)
self.rows = np.where(index)[0]
elif self._missing == 'drop-min':
drop_col, drop_col_index = keep_col(self.data)
drop_col_size = drop_col.size
drop_row, drop_row_index = keep_row(self.data)
drop_row_size = drop_row.size
if drop_row_size > drop_col_size:
self._adjusted_data = drop_row
self.rows = np.where(drop_row_index)[0]
else:
self._adjusted_data = drop_col
self.weights = self.weights[drop_col_index]
self.cols = np.where(drop_col_index)[0]
elif self._missing == 'fill-em':
self._adjusted_data = self._fill_missing_em()
elif self._missing is None:
if not np.isfinite(self._adjusted_data).all():
raise ValueError("""\
data contains non-finite values (inf, NaN). You should drop these values or
use one of the methods for adjusting data for missing-values.""")
else:
raise ValueError('missing method is not known.')
if self._index is not None:
self._columns = self._columns[self.cols]
self._index = self._index[self.rows]
# Check adjusted data size
if self._adjusted_data.size == 0:
raise ValueError('Removal of missing values has eliminated '
'all data.') | Implements alternatives for handling missing values | _adjust_missing | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_gls_weights(self):
"""
Computes GLS weights based on percentage of data fit
"""
projection = np.asarray(self.project(transform=False))
errors = self.transformed_data - projection
if self._ncomp == self._nvar:
raise ValueError('gls can only be used when ncomp < nvar '
'so that residuals have non-zero variance')
var = (errors ** 2.0).mean(0)
weights = 1.0 / var
weights = weights / np.sqrt((weights ** 2.0).mean())
nvar = self._nvar
eff_series_perc = (1.0 / sum((weights / weights.sum()) ** 2.0)) / nvar
if eff_series_perc < 0.1:
eff_series = int(np.round(eff_series_perc * nvar))
import warnings
warn = f"""\
Many series are being down weighted by GLS. Of the {nvar} series, the GLS
estimates are based on only {eff_series} (effective) series."""
warnings.warn(warn, EstimationWarning)
self.weights = weights | Computes GLS weights based on percentage of data fit | _compute_gls_weights | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _pca(self):
"""
Main PCA routine
"""
self._compute_eig()
self._compute_pca_from_eig()
self.projection = self.project() | Main PCA routine | _pca | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _prepare_data(self):
"""
Standardize or demean data.
"""
adj_data = self._adjusted_data
if np.all(np.isnan(adj_data)):
return np.empty(adj_data.shape[1]).fill(np.nan)
self._mu = np.nanmean(adj_data, axis=0)
self._sigma = np.sqrt(np.nanmean((adj_data - self._mu) ** 2.0, axis=0))
if self._standardize:
data = (adj_data - self._mu) / self._sigma
elif self._demean:
data = (adj_data - self._mu)
else:
data = adj_data
return data / np.sqrt(self.weights) | Standardize or demean data. | _prepare_data | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_eig(self):
"""
Wrapper for actual eigenvalue method
This is a workaround to avoid instance methods in __dict__
"""
if self._method == 'eig':
return self._compute_using_eig()
elif self._method == 'svd':
return self._compute_using_svd()
else: # self._method == 'nipals'
return self._compute_using_nipals() | Wrapper for actual eigenvalue method
This is a workaround to avoid instance methods in __dict__ | _compute_eig | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_using_svd(self):
"""SVD method to compute eigenvalues and eigenvecs"""
x = self.transformed_data
u, s, v = np.linalg.svd(x, full_matrices=self._svd_full_matrices)
self.eigenvals = s ** 2.0
self.eigenvecs = v.T | SVD method to compute eigenvalues and eigenvecs | _compute_using_svd | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_using_eig(self):
"""
Eigenvalue decomposition method to compute eigenvalues and eigenvectors
"""
x = self.transformed_data
self.eigenvals, self.eigenvecs = np.linalg.eigh(x.T.dot(x)) | Eigenvalue decomposition method to compute eigenvalues and eigenvectors | _compute_using_eig | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_using_nipals(self):
"""
NIPALS implementation to compute small number of eigenvalues
and eigenvectors
"""
x = self.transformed_data
if self._ncomp > 1:
x = x + 0.0 # Copy
tol, max_iter, ncomp = self._tol, self._max_iter, self._ncomp
vals = np.zeros(self._ncomp)
vecs = np.zeros((self._nvar, self._ncomp))
for i in range(ncomp):
max_var_ind = np.argmax(x.var(0))
factor = x[:, [max_var_ind]]
_iter = 0
diff = 1.0
while diff > tol and _iter < max_iter:
vec = x.T.dot(factor) / (factor.T.dot(factor))
vec = vec / np.sqrt(vec.T.dot(vec))
factor_last = factor
factor = x.dot(vec) / (vec.T.dot(vec))
diff = _norm(factor - factor_last) / _norm(factor)
_iter += 1
vals[i] = (factor ** 2).sum()
vecs[:, [i]] = vec
if ncomp > 1:
x -= factor.dot(vec.T)
self.eigenvals = vals
self.eigenvecs = vecs | NIPALS implementation to compute small number of eigenvalues
and eigenvectors | _compute_using_nipals | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _fill_missing_em(self):
"""
EM algorithm to fill missing values
"""
non_missing = np.logical_not(np.isnan(self.data))
# If nothing missing, return without altering the data
if np.all(non_missing):
return self.data
# 1. Standardized data as needed
data = self.transformed_data = np.asarray(self._prepare_data())
ncomp = self._ncomp
# 2. Check for all nans
col_non_missing = np.sum(non_missing, 1)
row_non_missing = np.sum(non_missing, 0)
if np.any(col_non_missing < ncomp) or np.any(row_non_missing < ncomp):
raise ValueError('Implementation requires that all columns and '
'all rows have at least ncomp non-missing values')
# 3. Get mask
mask = np.isnan(data)
# 4. Compute mean
mu = np.nanmean(data, 0)
# 5. Replace missing with mean
projection = np.ones((self._nobs, 1)) * mu
projection_masked = projection[mask]
data[mask] = projection_masked
# 6. Compute eigenvalues and fit
diff = 1.0
_iter = 0
while diff > self._tol_em and _iter < self._max_em_iter:
last_projection_masked = projection_masked
# Set transformed data to compute eigenvalues
self.transformed_data = data
# Call correct eig function here
self._compute_eig()
# Call function to compute factors and projection
self._compute_pca_from_eig()
projection = np.asarray(self.project(transform=False,
unweight=False))
projection_masked = projection[mask]
data[mask] = projection_masked
delta = last_projection_masked - projection_masked
diff = _norm(delta) / _norm(projection_masked)
_iter += 1
# Must copy to avoid overwriting original data since replacing values
data = self._adjusted_data + 0.0
projection = np.asarray(self.project())
data[mask] = projection[mask]
return data | EM algorithm to fill missing values | _fill_missing_em | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_pca_from_eig(self):
"""
Compute relevant statistics after eigenvalues have been computed
"""
# Ensure sorted largest to smallest
vals, vecs = self.eigenvals, self.eigenvecs
indices = np.argsort(vals)
indices = indices[::-1]
vals = vals[indices]
vecs = vecs[:, indices]
if (vals <= 0).any():
# Discard and warn
num_good = vals.shape[0] - (vals <= 0).sum()
if num_good < self._ncomp:
import warnings
warnings.warn('Only {num:d} eigenvalues are positive. '
'This is the maximum number of components '
'that can be extracted.'.format(num=num_good),
EstimationWarning)
self._ncomp = num_good
vals[num_good:] = np.finfo(np.float64).tiny
# Use ncomp for the remaining calculations
vals = vals[:self._ncomp]
vecs = vecs[:, :self._ncomp]
self.eigenvals, self.eigenvecs = vals, vecs
# Select correct number of components to return
self.scores = self.factors = self.transformed_data.dot(vecs)
self.loadings = vecs
self.coeff = vecs.T
if self._normalize:
self.coeff = (self.coeff.T * np.sqrt(vals)).T
self.factors /= np.sqrt(vals)
self.scores = self.factors | Compute relevant statistics after eigenvalues have been computed | _compute_pca_from_eig | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_rsquare_and_ic(self):
"""
Final statistics to compute
"""
# TSS and related calculations
# TODO: This needs careful testing, with and without weights,
# gls, standardized and demean
weights = self.weights
ss_data = self.transformed_data * np.sqrt(weights)
self._tss_indiv = np.sum(ss_data ** 2, 0)
self._tss = np.sum(self._tss_indiv)
self._ess = np.zeros(self._ncomp + 1)
self._ess_indiv = np.zeros((self._ncomp + 1, self._nvar))
for i in range(self._ncomp + 1):
# Projection in the same space as transformed_data
projection = self.project(ncomp=i, transform=False, unweight=False)
indiv_rss = (projection ** 2).sum(axis=0)
rss = indiv_rss.sum()
self._ess[i] = self._tss - rss
self._ess_indiv[i, :] = self._tss_indiv - indiv_rss
self.rsquare = 1.0 - self._ess / self._tss
# Information Criteria
ess = self._ess
invalid = ess <= 0 # Prevent log issues of 0
if invalid.any():
last_obs = (np.where(invalid)[0]).min()
ess = ess[:last_obs]
log_ess = np.log(ess)
r = np.arange(ess.shape[0])
nobs, nvar = self._nobs, self._nvar
sum_to_prod = (nobs + nvar) / (nobs * nvar)
min_dim = min(nobs, nvar)
penalties = np.array([sum_to_prod * np.log(1.0 / sum_to_prod),
sum_to_prod * np.log(min_dim),
np.log(min_dim) / min_dim])
penalties = penalties[:, None]
ic = log_ess + r * penalties
self.ic = ic.T | Final statistics to compute | _compute_rsquare_and_ic | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def project(self, ncomp=None, transform=True, unweight=True):
"""
Project series onto a specific number of factors.
Parameters
----------
ncomp : int, optional
Number of components to use. If omitted, all components
initially computed are used.
transform : bool, optional
Flag indicating whether to return the projection in the original
space of the data (True, default) or in the space of the
standardized/demeaned data.
unweight : bool, optional
Flag indicating whether to undo the effects of the estimation
weights.
Returns
-------
array_like
The nobs by nvar array of the projection onto ncomp factors.
Notes
-----
"""
# Projection needs to be scaled/shifted based on inputs
ncomp = self._ncomp if ncomp is None else ncomp
if ncomp > self._ncomp:
raise ValueError('ncomp must be smaller than the number of '
'components computed.')
factors = np.asarray(self.factors)
coeff = np.asarray(self.coeff)
projection = factors[:, :ncomp].dot(coeff[:ncomp, :])
if transform or unweight:
projection *= np.sqrt(self.weights)
if transform:
# Remove the weights, which do not depend on transformation
if self._standardize:
projection *= self._sigma
if self._standardize or self._demean:
projection += self._mu
if self._index is not None:
projection = pd.DataFrame(projection,
columns=self._columns,
index=self._index)
return projection | Project series onto a specific number of factors.
Parameters
----------
ncomp : int, optional
Number of components to use. If omitted, all components
initially computed are used.
transform : bool, optional
Flag indicating whether to return the projection in the original
space of the data (True, default) or in the space of the
standardized/demeaned data.
unweight : bool, optional
Flag indicating whether to undo the effects of the estimation
weights.
Returns
-------
array_like
The nobs by nvar array of the projection onto ncomp factors.
Notes
----- | project | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _to_pandas(self):
"""
Returns pandas DataFrames for all values
"""
index = self._index
# Principal Components
num_zeros = np.ceil(np.log10(self._ncomp))
comp_str = 'comp_{0:0' + str(int(num_zeros)) + 'd}'
cols = [comp_str.format(i) for i in range(self._ncomp)]
df = pd.DataFrame(self.factors, columns=cols, index=index)
self.scores = self.factors = df
# Projections
df = pd.DataFrame(self.projection,
columns=self._columns,
index=index)
self.projection = df
# Weights
df = pd.DataFrame(self.coeff, index=cols,
columns=self._columns)
self.coeff = df
# Loadings
df = pd.DataFrame(self.loadings,
index=self._columns, columns=cols)
self.loadings = df
# eigenvals
self.eigenvals = pd.Series(self.eigenvals)
self.eigenvals.name = 'eigenvals'
# eigenvecs
vec_str = comp_str.replace('comp', 'eigenvec')
cols = [vec_str.format(i) for i in range(self.eigenvecs.shape[1])]
self.eigenvecs = pd.DataFrame(self.eigenvecs, columns=cols)
# R2
self.rsquare = pd.Series(self.rsquare)
self.rsquare.index.name = 'ncomp'
self.rsquare.name = 'rsquare'
# IC
self.ic = pd.DataFrame(self.ic, columns=['IC_p1', 'IC_p2', 'IC_p3'])
self.ic.index.name = 'ncomp' | Returns pandas DataFrames for all values | _to_pandas | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def plot_scree(self, ncomp=None, log_scale=True,
cumulative=False, ax=None):
"""
Plot of the ordered eigenvalues
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
included the same as the number of components computed
log_scale : boot, optional
Flag indicating whether ot use a log scale for the y-axis
cumulative : bool, optional
Flag indicating whether to plot the eigenvalues or cumulative
eigenvalues
ax : AxesSubplot, optional
An axes on which to draw the graph. If omitted, new a figure
is created
Returns
-------
matplotlib.figure.Figure
The handle to the figure.
"""
import statsmodels.graphics.utils as gutils
fig, ax = gutils.create_mpl_ax(ax)
ncomp = self._ncomp if ncomp is None else ncomp
vals = np.asarray(self.eigenvals)
vals = vals[:self._ncomp]
if cumulative:
vals = np.cumsum(vals)
if log_scale:
ax.set_yscale('log')
ax.plot(np.arange(ncomp), vals[: ncomp], 'bo')
ax.autoscale(tight=True)
xlim = np.array(ax.get_xlim())
sp = xlim[1] - xlim[0]
xlim += 0.02 * np.array([-sp, sp])
ax.set_xlim(xlim)
ylim = np.array(ax.get_ylim())
scale = 0.02
if log_scale:
sp = np.log(ylim[1] / ylim[0])
ylim = np.exp(np.array([np.log(ylim[0]) - scale * sp,
np.log(ylim[1]) + scale * sp]))
else:
sp = ylim[1] - ylim[0]
ylim += scale * np.array([-sp, sp])
ax.set_ylim(ylim)
ax.set_title('Scree Plot')
ax.set_ylabel('Eigenvalue')
ax.set_xlabel('Component Number')
fig.tight_layout()
return fig | Plot of the ordered eigenvalues
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
included the same as the number of components computed
log_scale : boot, optional
Flag indicating whether ot use a log scale for the y-axis
cumulative : bool, optional
Flag indicating whether to plot the eigenvalues or cumulative
eigenvalues
ax : AxesSubplot, optional
An axes on which to draw the graph. If omitted, new a figure
is created
Returns
-------
matplotlib.figure.Figure
The handle to the figure. | plot_scree | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def plot_rsquare(self, ncomp=None, ax=None):
"""
Box plots of the individual series R-square against the number of PCs.
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
plot the minimum of 10 or the number of computed components.
ax : AxesSubplot, optional
An axes on which to draw the graph. If omitted, new a figure
is created.
Returns
-------
matplotlib.figure.Figure
The handle to the figure.
"""
import statsmodels.graphics.utils as gutils
fig, ax = gutils.create_mpl_ax(ax)
ncomp = 10 if ncomp is None else ncomp
ncomp = min(ncomp, self._ncomp)
# R2s in rows, series in columns
r2s = 1.0 - self._ess_indiv / self._tss_indiv
r2s = r2s[1:]
r2s = r2s[:ncomp]
ax.boxplot(r2s.T)
ax.set_title('Individual Input $R^2$')
ax.set_ylabel('$R^2$')
ax.set_xlabel('Number of Included Principal Components')
return fig | Box plots of the individual series R-square against the number of PCs.
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
plot the minimum of 10 or the number of computed components.
ax : AxesSubplot, optional
An axes on which to draw the graph. If omitted, new a figure
is created.
Returns
-------
matplotlib.figure.Figure
The handle to the figure. | plot_rsquare | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def pca(data, ncomp=None, standardize=True, demean=True, normalize=True,
gls=False, weights=None, method='svd'):
"""
Perform Principal Component Analysis (PCA).
Parameters
----------
data : ndarray
Variables in columns, observations in rows.
ncomp : int, optional
Number of components to return. If None, returns the as many as the
smaller to the number of rows or columns of data.
standardize : bool, optional
Flag indicating to use standardized data with mean 0 and unit
variance. standardized being True implies demean.
demean : bool, optional
Flag indicating whether to demean data before computing principal
components. demean is ignored if standardize is True.
normalize : bool , optional
Indicates whether th normalize the factors to have unit inner
product. If False, the loadings will have unit inner product.
gls : bool, optional
Flag indicating to implement a two-step GLS estimator where
in the first step principal components are used to estimate residuals,
and then the inverse residual variance is used as a set of weights to
estimate the final principal components
weights : ndarray, optional
Series weights to use after transforming data according to standardize
or demean when computing the principal components.
method : str, optional
Determines the linear algebra routine uses. 'eig', the default,
uses an eigenvalue decomposition. 'svd' uses a singular value
decomposition.
Returns
-------
factors : {ndarray, DataFrame}
Array (nobs, ncomp) of principal components (also known as scores).
loadings : {ndarray, DataFrame}
Array (ncomp, nvar) of principal component loadings for constructing
the factors.
projection : {ndarray, DataFrame}
Array (nobs, nvar) containing the projection of the data onto the ncomp
estimated factors.
rsquare : {ndarray, Series}
Array (ncomp,) where the element in the ith position is the R-square
of including the fist i principal components. The values are
calculated on the transformed data, not the original data.
ic : {ndarray, DataFrame}
Array (ncomp, 3) containing the Bai and Ng (2003) Information
criteria. Each column is a different criteria, and each row
represents the number of included factors.
eigenvals : {ndarray, Series}
Array of eigenvalues (nvar,).
eigenvecs : {ndarray, DataFrame}
Array of eigenvectors. (nvar, nvar).
Notes
-----
This is a simple function wrapper around the PCA class. See PCA for
more information and additional methods.
"""
pc = PCA(data, ncomp=ncomp, standardize=standardize, demean=demean,
normalize=normalize, gls=gls, weights=weights, method=method)
return (pc.factors, pc.loadings, pc.projection, pc.rsquare, pc.ic,
pc.eigenvals, pc.eigenvecs) | Perform Principal Component Analysis (PCA).
Parameters
----------
data : ndarray
Variables in columns, observations in rows.
ncomp : int, optional
Number of components to return. If None, returns the as many as the
smaller to the number of rows or columns of data.
standardize : bool, optional
Flag indicating to use standardized data with mean 0 and unit
variance. standardized being True implies demean.
demean : bool, optional
Flag indicating whether to demean data before computing principal
components. demean is ignored if standardize is True.
normalize : bool , optional
Indicates whether th normalize the factors to have unit inner
product. If False, the loadings will have unit inner product.
gls : bool, optional
Flag indicating to implement a two-step GLS estimator where
in the first step principal components are used to estimate residuals,
and then the inverse residual variance is used as a set of weights to
estimate the final principal components
weights : ndarray, optional
Series weights to use after transforming data according to standardize
or demean when computing the principal components.
method : str, optional
Determines the linear algebra routine uses. 'eig', the default,
uses an eigenvalue decomposition. 'svd' uses a singular value
decomposition.
Returns
-------
factors : {ndarray, DataFrame}
Array (nobs, ncomp) of principal components (also known as scores).
loadings : {ndarray, DataFrame}
Array (ncomp, nvar) of principal component loadings for constructing
the factors.
projection : {ndarray, DataFrame}
Array (nobs, nvar) containing the projection of the data onto the ncomp
estimated factors.
rsquare : {ndarray, Series}
Array (ncomp,) where the element in the ith position is the R-square
of including the fist i principal components. The values are
calculated on the transformed data, not the original data.
ic : {ndarray, DataFrame}
Array (ncomp, 3) containing the Bai and Ng (2003) Information
criteria. Each column is a different criteria, and each row
represents the number of included factors.
eigenvals : {ndarray, Series}
Array of eigenvalues (nvar,).
eigenvecs : {ndarray, DataFrame}
Array of eigenvectors. (nvar, nvar).
Notes
-----
This is a simple function wrapper around the PCA class. See PCA for
more information and additional methods. | pca | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def endog_names(self):
"""Names of endogenous variables"""
if self._endog_names is not None:
return self._endog_names
else:
if self.endog is not None:
return self.data.ynames
else:
d = 0
n = self.corr.shape[0] - 1
while n > 0:
d += 1
n //= 10
return [('var%0' + str(d) + 'd') % i
for i in range(self.corr.shape[0])] | Names of endogenous variables | endog_names | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def fit(self, maxiter=50, tol=1e-8, start=None, opt_method='BFGS',
opt=None, em_iter=3):
"""
Estimate factor model parameters.
Parameters
----------
maxiter : int
Maximum number of iterations for iterative estimation algorithms
tol : float
Stopping criteria (error tolerance) for iterative estimation
algorithms
start : array_like
Starting values, currently only used for ML estimation
opt_method : str
Optimization method for ML estimation
opt : dict-like
Keyword arguments passed to optimizer, only used for ML estimation
em_iter : int
The number of EM iterations before starting gradient optimization,
only used for ML estimation.
Returns
-------
FactorResults
Results class instance.
"""
method = self.method.lower()
if method == 'pa':
return self._fit_pa(maxiter=maxiter, tol=tol)
elif method == 'ml':
return self._fit_ml(start, em_iter, opt_method, opt)
else:
msg = "Unknown factor extraction approach '%s'" % self.method
raise ValueError(msg) | Estimate factor model parameters.
Parameters
----------
maxiter : int
Maximum number of iterations for iterative estimation algorithms
tol : float
Stopping criteria (error tolerance) for iterative estimation
algorithms
start : array_like
Starting values, currently only used for ML estimation
opt_method : str
Optimization method for ML estimation
opt : dict-like
Keyword arguments passed to optimizer, only used for ML estimation
em_iter : int
The number of EM iterations before starting gradient optimization,
only used for ML estimation.
Returns
-------
FactorResults
Results class instance. | fit | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def _fit_pa(self, maxiter=50, tol=1e-8):
"""
Extract factors using the iterative principal axis method
Parameters
----------
maxiter : int
Maximum number of iterations for communality estimation
tol : float
If `norm(communality - last_communality) < tolerance`,
estimation stops
Returns
-------
results : FactorResults instance
"""
R = self.corr.copy() # inplace modification below
# Parameter validation
self.n_comp = matrix_rank(R)
if self.n_factor > self.n_comp:
raise ValueError('n_factor must be smaller or equal to the rank'
' of endog! %d > %d' %
(self.n_factor, self.n_comp))
if maxiter <= 0:
raise ValueError('n_max_iter must be larger than 0! %d < 0' %
(maxiter))
if tol <= 0 or tol > 0.01:
raise ValueError('tolerance must be larger than 0 and smaller than'
' 0.01! Got %f instead' % (tol))
# Initial communality estimation
if self.smc:
c = 1 - 1 / np.diag(inv(R))
else:
c = np.ones(len(R))
# Iterative communality estimation
eigenvals = None
for i in range(maxiter):
# Get eigenvalues/eigenvectors of R with diag replaced by
# communality
for j in range(len(R)):
R[j, j] = c[j]
L, V = eigh(R, UPLO='U')
c_last = np.array(c)
ind = np.argsort(L)
ind = ind[::-1]
L = L[ind]
n_pos = (L > 0).sum()
V = V[:, ind]
eigenvals = np.array(L)
# Select eigenvectors with positive eigenvalues
n = np.min([n_pos, self.n_factor])
sL = np.diag(np.sqrt(L[:n]))
V = V[:, :n]
# Calculate new loadings and communality
A = V.dot(sL)
c = np.power(A, 2).sum(axis=1)
if norm(c_last - c) < tol:
break
self.eigenvals = eigenvals
self.communality = c
self.uniqueness = 1 - c
self.loadings = A
return FactorResults(self) | Extract factors using the iterative principal axis method
Parameters
----------
maxiter : int
Maximum number of iterations for communality estimation
tol : float
If `norm(communality - last_communality) < tolerance`,
estimation stops
Returns
-------
results : FactorResults instance | _fit_pa | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def loglike(self, par):
"""
Evaluate the log-likelihood function.
Parameters
----------
par : ndarray or tuple of 2 ndarray's
The model parameters, either a packed representation of
the model parameters or a 2-tuple containing a `k_endog x
n_factor` matrix of factor loadings and a `k_endog` vector
of uniquenesses.
Returns
-------
float
The value of the log-likelihood evaluated at par.
"""
if type(par) is np.ndarray:
uniq, load = self._unpack(par)
else:
load, uniq = par[0], par[1]
loadu = load / uniq[:, None]
lul = np.dot(load.T, loadu)
# log|GG' + S|
# Using matrix determinant lemma:
# |GG' + S| = |I + G'S^{-1}G|*|S|
lul.flat[::lul.shape[0]+1] += 1
_, ld = np.linalg.slogdet(lul)
v = np.sum(np.log(uniq)) + ld
# tr((GG' + S)^{-1}C)
# Using Sherman-Morrison-Woodbury
w = np.sum(1 / uniq)
b = np.dot(load.T, self.corr / uniq[:, None])
b = np.linalg.solve(lul, b)
b = np.dot(loadu, b)
w -= np.trace(b)
# Scaled log-likelihood
return -(v + w) / (2*self.k_endog) | Evaluate the log-likelihood function.
Parameters
----------
par : ndarray or tuple of 2 ndarray's
The model parameters, either a packed representation of
the model parameters or a 2-tuple containing a `k_endog x
n_factor` matrix of factor loadings and a `k_endog` vector
of uniquenesses.
Returns
-------
float
The value of the log-likelihood evaluated at par. | loglike | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def score(self, par):
"""
Evaluate the score function (first derivative of loglike).
Parameters
----------
par : ndarray or tuple of 2 ndarray's
The model parameters, either a packed representation of
the model parameters or a 2-tuple containing a `k_endog x
n_factor` matrix of factor loadings and a `k_endog` vector
of uniquenesses.
Returns
-------
ndarray
The score function evaluated at par.
"""
if type(par) is np.ndarray:
uniq, load = self._unpack(par)
else:
load, uniq = par[0], par[1]
# Center term of SMW
loadu = load / uniq[:, None]
c = np.dot(load.T, loadu)
c.flat[::c.shape[0]+1] += 1
d = np.linalg.solve(c, load.T)
# Precompute these terms
lud = np.dot(loadu, d)
cu = (self.corr / uniq) / uniq[:, None]
r = np.dot(cu, load)
lul = np.dot(lud.T, load)
luz = np.dot(cu, lul)
# First term
du = 2*np.sqrt(uniq) * (1/uniq - (d * load.T).sum(0) / uniq**2)
dl = 2*(loadu - np.dot(lud, loadu))
# Second term
h = np.dot(lud, cu)
f = np.dot(h, lud.T)
du -= 2*np.sqrt(uniq) * (np.diag(cu) - 2*np.diag(h) + np.diag(f))
dl -= 2*r
dl += 2*np.dot(lud, r)
dl += 2*luz
dl -= 2*np.dot(lud, luz)
# Cannot use _pack because we are working with the square root
# uniquenesses directly.
return -np.concatenate((du, dl.T.flat)) / (2*self.k_endog) | Evaluate the score function (first derivative of loglike).
Parameters
----------
par : ndarray or tuple of 2 ndarray's
The model parameters, either a packed representation of
the model parameters or a 2-tuple containing a `k_endog x
n_factor` matrix of factor loadings and a `k_endog` vector
of uniquenesses.
Returns
-------
ndarray
The score function evaluated at par. | score | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def _fit_ml(self, start, em_iter, opt_method, opt):
"""estimate Factor model using Maximum Likelihood
"""
# Starting values
if start is None:
load, uniq = self._fit_ml_em(em_iter)
start = self._pack(load, uniq)
elif len(start) == 2:
if len(start[1]) != start[0].shape[0]:
msg = "Starting values have incompatible dimensions"
raise ValueError(msg)
start = self._pack(start[0], start[1])
else:
raise ValueError("Invalid starting values")
def nloglike(par):
return -self.loglike(par)
def nscore(par):
return -self.score(par)
# Do the optimization
if opt is None:
opt = _opt_defaults
r = minimize(nloglike, start, jac=nscore, method=opt_method,
options=opt)
if not r.success:
warnings.warn("Fitting did not converge")
par = r.x
uniq, load = self._unpack(par)
if uniq.min() < 1e-10:
warnings.warn("Some uniquenesses are nearly zero")
# Rotate solution to satisfy IC3 of Bai and Li
load = self._rotate(load, uniq)
self.uniqueness = uniq
self.communality = 1 - uniq
self.loadings = load
self.mle_retvals = r
return FactorResults(self) | estimate Factor model using Maximum Likelihood | _fit_ml | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def _fit_ml_em(self, iter, random_state=None):
"""estimate Factor model using EM algorithm
"""
# Starting values
if random_state is None:
random_state = np.random.RandomState(3427)
load = 0.1 * random_state.standard_normal(size=(self.k_endog, self.n_factor))
uniq = 0.5 * np.ones(self.k_endog)
for k in range(iter):
loadu = load / uniq[:, None]
f = np.dot(load.T, loadu)
f.flat[::f.shape[0]+1] += 1
r = np.linalg.solve(f, loadu.T)
q = np.dot(loadu.T, load)
h = np.dot(r, load)
c = load - np.dot(load, h)
c /= uniq[:, None]
g = np.dot(q, r)
e = np.dot(g, self.corr)
d = np.dot(loadu.T, self.corr) - e
a = np.dot(d, c)
a -= np.dot(load.T, c)
a.flat[::a.shape[0]+1] += 1
b = np.dot(self.corr, c)
load = np.linalg.solve(a, b.T).T
uniq = np.diag(self.corr) - (load * d.T).sum(1)
return load, uniq | estimate Factor model using EM algorithm | _fit_ml_em | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def _rotate(self, load, uniq):
"""rotate loadings for MLE
"""
# Rotations used in ML estimation.
load, s, _ = np.linalg.svd(load, 0)
load *= s
if self.nobs is None:
nobs = 1
else:
nobs = self.nobs
cm = np.dot(load.T, load / uniq[:, None]) / nobs
_, f = np.linalg.eig(cm)
load = np.dot(load, f)
return load | rotate loadings for MLE | _rotate | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def rotate(self, method):
"""
Apply rotation, inplace modification of this Results instance
Parameters
----------
method : str
Rotation to be applied. Allowed methods are varimax,
quartimax, biquartimax, equamax, oblimin, parsimax,
parsimony, biquartimin, promax.
Returns
-------
None : nothing returned, modifications are inplace
Notes
-----
Warning: 'varimax', 'quartimax' and 'oblimin' are verified against R or
Stata. Some rotation methods such as promax do not produce the same
results as the R or Stata default functions.
See Also
--------
factor_rotation : subpackage that implements rotation methods
"""
self.rotation_method = method
if method not in ['varimax', 'quartimax', 'biquartimax',
'equamax', 'oblimin', 'parsimax', 'parsimony',
'biquartimin', 'promax']:
raise ValueError('Unknown rotation method %s' % (method))
if method in ['varimax', 'quartimax', 'biquartimax', 'equamax',
'parsimax', 'parsimony', 'biquartimin']:
self.loadings, T = rotate_factors(self.loadings_no_rot, method)
elif method == 'oblimin':
self.loadings, T = rotate_factors(self.loadings_no_rot,
'quartimin')
elif method == 'promax':
self.loadings, T = promax(self.loadings_no_rot)
else:
raise ValueError('rotation method not recognized')
self.rotation_matrix = T | Apply rotation, inplace modification of this Results instance
Parameters
----------
method : str
Rotation to be applied. Allowed methods are varimax,
quartimax, biquartimax, equamax, oblimin, parsimax,
parsimony, biquartimin, promax.
Returns
-------
None : nothing returned, modifications are inplace
Notes
-----
Warning: 'varimax', 'quartimax' and 'oblimin' are verified against R or
Stata. Some rotation methods such as promax do not produce the same
results as the R or Stata default functions.
See Also
--------
factor_rotation : subpackage that implements rotation methods | rotate | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def _corr_factors(self):
"""correlation of factors implied by rotation
If the rotation is oblique, then the factors are correlated.
currently not cached
Returns
-------
corr_f : ndarray
correlation matrix of rotated factors, assuming initial factors are
orthogonal
"""
T = self.rotation_matrix
corr_f = T.T.dot(T)
return corr_f | correlation of factors implied by rotation
If the rotation is oblique, then the factors are correlated.
currently not cached
Returns
-------
corr_f : ndarray
correlation matrix of rotated factors, assuming initial factors are
orthogonal | _corr_factors | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def factor_score_params(self, method='bartlett'):
"""
Compute factor scoring coefficient matrix
The coefficient matrix is not cached.
Parameters
----------
method : 'bartlett' or 'regression'
Method to use for factor scoring.
'regression' can be abbreviated to `reg`
Returns
-------
coeff_matrix : ndarray
matrix s to compute factors f from a standardized endog ys.
``f = ys dot s``
Notes
-----
The `regression` method follows the Stata definition.
Method bartlett and regression are verified against Stats.
Two unofficial methods, 'ols' and 'gls', produce similar factor scores
but are not verified.
See Also
--------
statsmodels.multivariate.factor.FactorResults.factor_scoring
"""
L = self.loadings
#TODO: check row versus column convention for T
uni = 1 - self.communality #self.uniqueness
if method == 'bartlett':
s_mat = np.linalg.inv(L.T.dot(L/(uni[:,None]))).dot(L.T / uni).T
elif method.startswith('reg'):
corr = self.model.corr
corr_f = self._corr_factors()
# if orthogonal then corr_f is just eye
s_mat = corr_f.dot(L.T.dot(np.linalg.inv(corr))).T
elif method == 'ols':
# not verified
corr = self.model.corr
corr_f = self._corr_factors()
s_mat = corr_f.dot(np.linalg.pinv(L)).T
elif method == 'gls':
# not verified
#s_mat = np.linalg.inv(1*np.eye(L.shape[1]) + L.T.dot(L/(uni[:,None])))
corr = self.model.corr
corr_f = self._corr_factors()
s_mat = np.linalg.inv(np.linalg.inv(corr_f) + L.T.dot(L/(uni[:,None])))
s_mat = s_mat.dot(L.T / uni).T
else:
raise ValueError('method not available, use "bartlett ' +
'or "regression"')
return s_mat | Compute factor scoring coefficient matrix
The coefficient matrix is not cached.
Parameters
----------
method : 'bartlett' or 'regression'
Method to use for factor scoring.
'regression' can be abbreviated to `reg`
Returns
-------
coeff_matrix : ndarray
matrix s to compute factors f from a standardized endog ys.
``f = ys dot s``
Notes
-----
The `regression` method follows the Stata definition.
Method bartlett and regression are verified against Stats.
Two unofficial methods, 'ols' and 'gls', produce similar factor scores
but are not verified.
See Also
--------
statsmodels.multivariate.factor.FactorResults.factor_scoring | factor_score_params | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def factor_scoring(self, endog=None, method='bartlett', transform=True):
"""
factor scoring: compute factors for endog
If endog was not provided when creating the factor class, then
a standarized endog needs to be provided here.
Parameters
----------
method : 'bartlett' or 'regression'
Method to use for factor scoring.
'regression' can be abbreviated to `reg`
transform : bool
If transform is true and endog is provided, then it will be
standardized using mean and scale of original data, which has to
be available in this case.
If transform is False, then a provided endog will be used unchanged.
The original endog in the Factor class will
always be standardized if endog is None, independently of `transform`.
Returns
-------
factor_score : ndarray
estimated factors using scoring matrix s and standarized endog ys
``f = ys dot s``
Notes
-----
Status: transform option is experimental and might change.
See Also
--------
statsmodels.multivariate.factor.FactorResults.factor_score_params
"""
if transform is False and endog is not None:
# no transformation in this case
endog = np.asarray(endog)
else:
# we need to standardize with the original mean and scale
if self.model.endog is not None:
m = self.model.endog.mean(0)
s = self.model.endog.std(ddof=1, axis=0)
if endog is None:
endog = self.model.endog
else:
endog = np.asarray(endog)
else:
raise ValueError('If transform is True, then `endog` needs ' +
'to be available in the Factor instance.')
endog = (endog - m) / s
s_mat = self.factor_score_params(method=method)
factors = endog.dot(s_mat)
return factors | factor scoring: compute factors for endog
If endog was not provided when creating the factor class, then
a standarized endog needs to be provided here.
Parameters
----------
method : 'bartlett' or 'regression'
Method to use for factor scoring.
'regression' can be abbreviated to `reg`
transform : bool
If transform is true and endog is provided, then it will be
standardized using mean and scale of original data, which has to
be available in this case.
If transform is False, then a provided endog will be used unchanged.
The original endog in the Factor class will
always be standardized if endog is None, independently of `transform`.
Returns
-------
factor_score : ndarray
estimated factors using scoring matrix s and standarized endog ys
``f = ys dot s``
Notes
-----
Status: transform option is experimental and might change.
See Also
--------
statsmodels.multivariate.factor.FactorResults.factor_score_params | factor_scoring | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def summary(self):
"""Summary"""
summ = summary2.Summary()
summ.add_title('Factor analysis results')
loadings_no_rot = pd.DataFrame(
self.loadings_no_rot,
columns=["factor %d" % (i)
for i in range(self.loadings_no_rot.shape[1])],
index=self.endog_names
)
if hasattr(self, "eigenvals"):
# eigenvals not available for ML method
eigenvals = pd.DataFrame(
[self.eigenvals], columns=self.endog_names, index=[''])
summ.add_dict({'': 'Eigenvalues'})
summ.add_df(eigenvals)
communality = pd.DataFrame([self.communality],
columns=self.endog_names, index=[''])
summ.add_dict({'': ''})
summ.add_dict({'': 'Communality'})
summ.add_df(communality)
summ.add_dict({'': ''})
summ.add_dict({'': 'Pre-rotated loadings'})
summ.add_df(loadings_no_rot)
summ.add_dict({'': ''})
if self.rotation_method is not None:
loadings = pd.DataFrame(
self.loadings,
columns=["factor %d" % (i)
for i in range(self.loadings.shape[1])],
index=self.endog_names
)
summ.add_dict({'': '%s rotated loadings' % (self.rotation_method)})
summ.add_df(loadings)
return summ | Summary | summary | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def color_white_small(val):
"""
Takes a scalar and returns a string with
the css property `'color: white'` for small values, black otherwise.
takes threshold from outer scope
"""
color = 'white' if np.abs(val) < threshold else 'black'
return 'color: %s' % color | Takes a scalar and returns a string with
the css property `'color: white'` for small values, black otherwise.
takes threshold from outer scope | get_loadings_frame.color_white_small | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
s = np.abs(s)
is_max = s == s.max()
return ['background-color: '+ color_max if v else '' for v in is_max] | highlight the maximum in a Series yellow. | get_loadings_frame.highlight_max | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def get_loadings_frame(self, style='display', sort_=True, threshold=0.3,
highlight_max=True, color_max='yellow',
decimals=None):
"""get loadings matrix as DataFrame or pandas Styler
Parameters
----------
style : 'display' (default), 'raw' or 'strings'
Style to use for display
* 'raw' returns just a DataFrame of the loadings matrix, no options are
applied
* 'display' add sorting and styling as defined by other keywords
* 'strings' returns a DataFrame with string elements with optional sorting
and suppressing small loading coefficients.
sort_ : bool
If True, then the rows of the DataFrame is sorted by contribution of each
factor. applies if style is either 'display' or 'strings'
threshold : float
If the threshold is larger than zero, then loading coefficients are
either colored white (if style is 'display') or replace by empty
string (if style is 'strings').
highlight_max : bool
This add a background color to the largest coefficient in each row.
color_max : html color
default is 'yellow'. color for background of row maximum
decimals : None or int
If None, then pandas default precision applies. Otherwise values are
rounded to the specified decimals. If style is 'display', then the
underlying dataframe is not changed. If style is 'strings', then
values are rounded before conversion to strings.
Returns
-------
loadings : DataFrame or pandas Styler instance
The return is a pandas Styler instance, if style is 'display' and
at least one of highlight_max, threshold or decimals is applied.
Otherwise, the returned loadings is a DataFrame.
Examples
--------
>>> mod = Factor(df, 3, smc=True)
>>> res = mod.fit()
>>> res.get_loadings_frame(style='display', decimals=3, threshold=0.2)
To get a sorted DataFrame, all styling options need to be turned off:
>>> df_sorted = res.get_loadings_frame(style='display',
... highlight_max=False, decimals=None, threshold=0)
Options except for highlighting are available for plain test or Latex
usage:
>>> lds = res_u.get_loadings_frame(style='strings', decimals=3,
... threshold=0.3)
>>> print(lds.to_latex())
"""
loadings_df = pd.DataFrame(
self.loadings,
columns=["factor %d" % (i)
for i in range(self.loadings.shape[1])],
index=self.endog_names
)
if style not in ['raw', 'display', 'strings']:
msg = "style has to be one of 'raw', 'display', 'strings'"
raise ValueError(msg)
if style == 'raw':
return loadings_df
# add sorting and some formatting
if sort_ is True:
loadings_df2 = loadings_df.copy()
n_f = len(loadings_df2)
high = np.abs(loadings_df2.values).argmax(1)
loadings_df2['high'] = high
loadings_df2['largest'] = np.abs(loadings_df.values[np.arange(n_f), high])
loadings_df2.sort_values(by=['high', 'largest'], ascending=[True, False], inplace=True)
loadings_df = loadings_df2.drop(['high', 'largest'], axis=1)
if style == 'display':
sty = None
if threshold > 0:
def color_white_small(val):
"""
Takes a scalar and returns a string with
the css property `'color: white'` for small values, black otherwise.
takes threshold from outer scope
"""
color = 'white' if np.abs(val) < threshold else 'black'
return 'color: %s' % color
try:
sty = loadings_df.style.map(color_white_small)
except AttributeError:
# Deprecated in pandas 2.1
sty = loadings_df.style.applymap(color_white_small)
if highlight_max is True:
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
s = np.abs(s)
is_max = s == s.max()
return ['background-color: '+ color_max if v else '' for v in is_max]
if sty is None:
sty = loadings_df.style
sty = sty.apply(highlight_max, axis=1)
if decimals is not None:
if sty is None:
sty = loadings_df.style
sty.format("{:.%sf}" % decimals)
if sty is None:
return loadings_df
else:
return sty
if style == 'strings':
ld = loadings_df
if decimals is not None:
ld = ld.round(decimals)
ld = ld.astype(str)
if threshold > 0:
ld[loadings_df.abs() < threshold] = ''
return ld | get loadings matrix as DataFrame or pandas Styler
Parameters
----------
style : 'display' (default), 'raw' or 'strings'
Style to use for display
* 'raw' returns just a DataFrame of the loadings matrix, no options are
applied
* 'display' add sorting and styling as defined by other keywords
* 'strings' returns a DataFrame with string elements with optional sorting
and suppressing small loading coefficients.
sort_ : bool
If True, then the rows of the DataFrame is sorted by contribution of each
factor. applies if style is either 'display' or 'strings'
threshold : float
If the threshold is larger than zero, then loading coefficients are
either colored white (if style is 'display') or replace by empty
string (if style is 'strings').
highlight_max : bool
This add a background color to the largest coefficient in each row.
color_max : html color
default is 'yellow'. color for background of row maximum
decimals : None or int
If None, then pandas default precision applies. Otherwise values are
rounded to the specified decimals. If style is 'display', then the
underlying dataframe is not changed. If style is 'strings', then
values are rounded before conversion to strings.
Returns
-------
loadings : DataFrame or pandas Styler instance
The return is a pandas Styler instance, if style is 'display' and
at least one of highlight_max, threshold or decimals is applied.
Otherwise, the returned loadings is a DataFrame.
Examples
--------
>>> mod = Factor(df, 3, smc=True)
>>> res = mod.fit()
>>> res.get_loadings_frame(style='display', decimals=3, threshold=0.2)
To get a sorted DataFrame, all styling options need to be turned off:
>>> df_sorted = res.get_loadings_frame(style='display',
... highlight_max=False, decimals=None, threshold=0)
Options except for highlighting are available for plain test or Latex
usage:
>>> lds = res_u.get_loadings_frame(style='strings', decimals=3,
... threshold=0.3)
>>> print(lds.to_latex()) | get_loadings_frame | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def plot_scree(self, ncomp=None):
"""
Plot of the ordered eigenvalues and variance explained for the loadings
Parameters
----------
ncomp : int, optional
Number of loadings to include in the plot. If None, will
included the same as the number of maximum possible loadings
Returns
-------
Figure
Handle to the figure.
"""
_import_mpl()
from .plots import plot_scree
return plot_scree(self.eigenvals, self.n_comp, ncomp) | Plot of the ordered eigenvalues and variance explained for the loadings
Parameters
----------
ncomp : int, optional
Number of loadings to include in the plot. If None, will
included the same as the number of maximum possible loadings
Returns
-------
Figure
Handle to the figure. | plot_scree | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def plot_loadings(self, loading_pairs=None, plot_prerotated=False):
"""
Plot factor loadings in 2-d plots
Parameters
----------
loading_pairs : None or a list of tuples
Specify plots. Each tuple (i, j) represent one figure, i and j is
the loading number for x-axis and y-axis, respectively. If `None`,
all combinations of the loadings will be plotted.
plot_prerotated : True or False
If True, the loadings before rotation applied will be plotted. If
False, rotated loadings will be plotted.
Returns
-------
figs : a list of figure handles
"""
_import_mpl()
from .plots import plot_loadings
if self.rotation_method is None:
plot_prerotated = True
loadings = self.loadings_no_rot if plot_prerotated else self.loadings
if plot_prerotated:
title = 'Prerotated Factor Pattern'
else:
title = '%s Rotated Factor Pattern' % (self.rotation_method)
var_explained = self.eigenvals / self.n_comp * 100
return plot_loadings(loadings, loading_pairs=loading_pairs,
title=title, row_names=self.endog_names,
percent_variance=var_explained) | Plot factor loadings in 2-d plots
Parameters
----------
loading_pairs : None or a list of tuples
Specify plots. Each tuple (i, j) represent one figure, i and j is
the loading number for x-axis and y-axis, respectively. If `None`,
all combinations of the loadings will be plotted.
plot_prerotated : True or False
If True, the loadings before rotation applied will be plotted. If
False, rotated loadings will be plotted.
Returns
-------
figs : a list of figure handles | plot_loadings | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def fitted_cov(self):
"""
Returns the fitted covariance matrix.
"""
c = np.dot(self.loadings, self.loadings.T)
c.flat[::c.shape[0]+1] += self.uniqueness
return c | Returns the fitted covariance matrix. | fitted_cov | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def uniq_stderr(self, kurt=0):
"""
The standard errors of the uniquenesses.
Parameters
----------
kurt : float
Excess kurtosis
Notes
-----
If excess kurtosis is known, provide as `kurt`. Standard
errors are only available if the model was fit using maximum
likelihood. If `endog` is not provided, `nobs` must be
provided to obtain standard errors.
These are asymptotic standard errors. See Bai and Li (2012)
for conditions under which the standard errors are valid.
The standard errors are only applicable to the original,
unrotated maximum likelihood solution.
"""
if self.fa_method.lower() != "ml":
msg = "Standard errors only available under ML estimation"
raise ValueError(msg)
if self.nobs is None:
msg = "nobs is required to obtain standard errors."
raise ValueError(msg)
v = self.uniqueness**2 * (2 + kurt)
return np.sqrt(v / self.nobs) | The standard errors of the uniquenesses.
Parameters
----------
kurt : float
Excess kurtosis
Notes
-----
If excess kurtosis is known, provide as `kurt`. Standard
errors are only available if the model was fit using maximum
likelihood. If `endog` is not provided, `nobs` must be
provided to obtain standard errors.
These are asymptotic standard errors. See Bai and Li (2012)
for conditions under which the standard errors are valid.
The standard errors are only applicable to the original,
unrotated maximum likelihood solution. | uniq_stderr | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def load_stderr(self):
"""
The standard errors of the loadings.
Standard errors are only available if the model was fit using
maximum likelihood. If `endog` is not provided, `nobs` must be
provided to obtain standard errors.
These are asymptotic standard errors. See Bai and Li (2012)
for conditions under which the standard errors are valid.
The standard errors are only applicable to the original,
unrotated maximum likelihood solution.
"""
if self.fa_method.lower() != "ml":
msg = "Standard errors only available under ML estimation"
raise ValueError(msg)
if self.nobs is None:
msg = "nobs is required to obtain standard errors."
raise ValueError(msg)
v = np.outer(self.uniqueness, np.ones(self.loadings.shape[1]))
return np.sqrt(v / self.nobs) | The standard errors of the loadings.
Standard errors are only available if the model was fit using
maximum likelihood. If `endog` is not provided, `nobs` must be
provided to obtain standard errors.
These are asymptotic standard errors. See Bai and Li (2012)
for conditions under which the standard errors are valid.
The standard errors are only applicable to the original,
unrotated maximum likelihood solution. | load_stderr | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def mv_test(self, hypotheses=None, skip_intercept_test=False):
"""
Linear hypotheses testing
Parameters
----------
%(hypotheses_doc)s
skip_intercept_test : bool
If true, then testing the intercept is skipped, the model is not
changed.
Note: If a term has a numerically insignificant effect, then
an exception because of emtpy arrays may be raised. This can
happen for the intercept if the data has been demeaned.
Returns
-------
results: MultivariateTestResults
Notes
-----
Testing the linear hypotheses
L * params * M = 0
where `params` is the regression coefficient matrix for the
linear model y = x * params
If the model is not specified using the formula interfact, then the
hypotheses test each included exogenous variable, one at a time. In
most applications with categorical variables, the ``from_formula``
interface should be preferred when specifying a model since it
provides knowledge about the model when specifying the hypotheses.
"""
if hypotheses is None:
if (
hasattr(self, "data")
and self.data is not None
and hasattr(self.data, "model_spec")
):
# TODO: patsy migration
mgr = FormulaManager()
terms = mgr.get_term_name_slices(self.data.model_spec)
hypotheses = []
for key in terms:
if skip_intercept_test and (
key == "Intercept" or key == mgr.intercept_term
):
continue
L_contrast = np.eye(self.exog.shape[1])[terms[key], :]
test_name = str(key)
if key == mgr.intercept_term:
test_name = "Intercept"
hypotheses.append([test_name, L_contrast, None])
else:
hypotheses = []
for i in range(self.exog.shape[1]):
name = "x%d" % (i)
L = np.zeros([1, self.exog.shape[1]])
L[0, i] = 1
hypotheses.append([name, L, None])
results = _multivariate_ols_test(
hypotheses, self._fittedmod, self.exog_names, self.endog_names
)
return MultivariateTestResults(results, self.endog_names, self.exog_names) | Linear hypotheses testing
Parameters
----------
%(hypotheses_doc)s
skip_intercept_test : bool
If true, then testing the intercept is skipped, the model is not
changed.
Note: If a term has a numerically insignificant effect, then
an exception because of emtpy arrays may be raised. This can
happen for the intercept if the data has been demeaned.
Returns
-------
results: MultivariateTestResults
Notes
-----
Testing the linear hypotheses
L * params * M = 0
where `params` is the regression coefficient matrix for the
linear model y = x * params
If the model is not specified using the formula interfact, then the
hypotheses test each included exogenous variable, one at a time. In
most applications with categorical variables, the ``from_formula``
interface should be preferred when specifying a model since it
provides knowledge about the model when specifying the hypotheses. | mv_test | python | statsmodels/statsmodels | statsmodels/multivariate/manova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/manova.py | BSD-3-Clause |
def plot_scree(eigenvals, total_var, ncomp=None, x_label='factor'):
"""
Plot of the ordered eigenvalues and variance explained for the loadings
Parameters
----------
eigenvals : array_like
The eigenvalues
total_var : float
the total variance (for plotting percent variance explained)
ncomp : int, optional
Number of factors to include in the plot. If None, will
included the same as the number of maximum possible loadings
x_label : str
label of x-axis
Returns
-------
Figure
Handle to the figure.
"""
fig = plt.figure()
ncomp = len(eigenvals) if ncomp is None else ncomp
vals = eigenvals
vals = vals[:ncomp]
# vals = np.cumsum(vals)
ax = fig.add_subplot(121)
ax.plot(np.arange(ncomp), vals[: ncomp], 'b-o')
ax.autoscale(tight=True)
xlim = np.array(ax.get_xlim())
sp = xlim[1] - xlim[0]
xlim += 0.02 * np.array([-sp, sp])
ax.set_xticks(np.arange(ncomp))
ax.set_xlim(xlim)
ylim = np.array(ax.get_ylim())
scale = 0.02
sp = ylim[1] - ylim[0]
ylim += scale * np.array([-sp, sp])
ax.set_ylim(ylim)
ax.set_title('Scree Plot')
ax.set_ylabel('Eigenvalue')
ax.set_xlabel(x_label)
per_variance = vals / total_var
cumper_variance = np.cumsum(per_variance)
ax = fig.add_subplot(122)
ax.plot(np.arange(ncomp), per_variance[: ncomp], 'b-o')
ax.plot(np.arange(ncomp), cumper_variance[: ncomp], 'g--o')
ax.autoscale(tight=True)
xlim = np.array(ax.get_xlim())
sp = xlim[1] - xlim[0]
xlim += 0.02 * np.array([-sp, sp])
ax.set_xticks(np.arange(ncomp))
ax.set_xlim(xlim)
ylim = np.array(ax.get_ylim())
scale = 0.02
sp = ylim[1] - ylim[0]
ylim += scale * np.array([-sp, sp])
ax.set_ylim(ylim)
ax.set_title('Variance Explained')
ax.set_ylabel('Proportion')
ax.set_xlabel(x_label)
ax.legend(['Proportion', 'Cumulative'], loc=5)
fig.tight_layout()
return fig | Plot of the ordered eigenvalues and variance explained for the loadings
Parameters
----------
eigenvals : array_like
The eigenvalues
total_var : float
the total variance (for plotting percent variance explained)
ncomp : int, optional
Number of factors to include in the plot. If None, will
included the same as the number of maximum possible loadings
x_label : str
label of x-axis
Returns
-------
Figure
Handle to the figure. | plot_scree | python | statsmodels/statsmodels | statsmodels/multivariate/plots.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/plots.py | BSD-3-Clause |
def plot_loadings(loadings, col_names=None, row_names=None,
loading_pairs=None, percent_variance=None,
title='Factor patterns'):
"""
Plot factor loadings in 2-d plots
Parameters
----------
loadings : array like
Each column is a component (or factor)
col_names : a list of strings
column names of `loadings`
row_names : a list of strings
row names of `loadings`
loading_pairs : None or a list of tuples
Specify plots. Each tuple (i, j) represent one figure, i and j is
the loading number for x-axis and y-axis, respectively. If `None`,
all combinations of the loadings will be plotted.
percent_variance : array_like
The percent variance explained by each factor.
Returns
-------
figs : a list of figure handles
"""
k_var, n_factor = loadings.shape
if loading_pairs is None:
loading_pairs = []
for i in range(n_factor):
for j in range(i + 1,n_factor):
loading_pairs.append([i, j])
if col_names is None:
col_names = ["factor %d" % i for i in range(n_factor)]
if row_names is None:
row_names = ["var %d" % i for i in range(k_var)]
figs = []
for item in loading_pairs:
i = item[0]
j = item[1]
fig = plt.figure(figsize=(7, 7))
figs.append(fig)
ax = fig.add_subplot(111)
for k in range(loadings.shape[0]):
plt.text(loadings[k, i], loadings[k, j],
row_names[k], fontsize=12)
ax.plot(loadings[:, i], loadings[:, j], 'bo')
ax.set_title(title)
if percent_variance is not None:
x_str = f'{col_names[i]} ({percent_variance[i]:.1f}%)'
y_str = f'{col_names[j]} ({percent_variance[j]:.1f}%)'
ax.set_xlabel(x_str)
ax.set_ylabel(y_str)
else:
ax.set_xlabel(col_names[i])
ax.set_ylabel(col_names[j])
v = 1.05
xlim = np.array([-v, v])
ylim = np.array([-v, v])
ax.plot(xlim, [0, 0], 'k--')
ax.plot([0, 0], ylim, 'k--')
ax.set_aspect('equal', 'datalim')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
fig.tight_layout()
return figs | Plot factor loadings in 2-d plots
Parameters
----------
loadings : array like
Each column is a component (or factor)
col_names : a list of strings
column names of `loadings`
row_names : a list of strings
row names of `loadings`
loading_pairs : None or a list of tuples
Specify plots. Each tuple (i, j) represent one figure, i and j is
the loading number for x-axis and y-axis, respectively. If `None`,
all combinations of the loadings will be plotted.
percent_variance : array_like
The percent variance explained by each factor.
Returns
-------
figs : a list of figure handles | plot_loadings | python | statsmodels/statsmodels | statsmodels/multivariate/plots.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/plots.py | BSD-3-Clause |
def _fit(self, tolerance=1e-8):
"""Fit the model
A ValueError is raised if there are singular values smaller than the
tolerance. The treatment of singular arrays might change in future.
Parameters
----------
tolerance : float
eigenvalue tolerance, values smaller than which is considered 0
"""
nobs, k_yvar = self.endog.shape
nobs, k_xvar = self.exog.shape
k = np.min([k_yvar, k_xvar])
x = np.array(self.exog)
x = x - x.mean(0)
y = np.array(self.endog)
y = y - y.mean(0)
ux, sx, vx = svd(x, 0)
# vx_ds = vx.T divided by sx
vx_ds = vx.T
mask = sx > tolerance
if mask.sum() < len(mask):
raise ValueError('exog is collinear.')
vx_ds[:, mask] /= sx[mask]
uy, sy, vy = svd(y, 0)
# vy_ds = vy.T divided by sy
vy_ds = vy.T
mask = sy > tolerance
if mask.sum() < len(mask):
raise ValueError('endog is collinear.')
vy_ds[:, mask] /= sy[mask]
u, s, v = svd(ux.T.dot(uy), 0)
# Correct any roundoff
self.cancorr = np.array([max(0, min(s[i], 1)) for i in range(len(s))])
self.x_cancoef = vx_ds.dot(u[:, :k])
self.y_cancoef = vy_ds.dot(v.T[:, :k]) | Fit the model
A ValueError is raised if there are singular values smaller than the
tolerance. The treatment of singular arrays might change in future.
Parameters
----------
tolerance : float
eigenvalue tolerance, values smaller than which is considered 0 | _fit | python | statsmodels/statsmodels | statsmodels/multivariate/cancorr.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/cancorr.py | BSD-3-Clause |
def corr_test(self):
"""Approximate F test
Perform multivariate statistical tests of the hypothesis that
there is no canonical correlation between endog and exog.
For each canonical correlation, testing its significance based on
Wilks' lambda.
Returns
-------
CanCorrTestResults instance
"""
nobs, k_yvar = self.endog.shape
nobs, k_xvar = self.exog.shape
eigenvals = np.power(self.cancorr, 2)
stats = pd.DataFrame(columns=['Canonical Correlation', "Wilks' lambda",
'Num DF','Den DF', 'F Value','Pr > F'],
index=list(range(len(eigenvals) - 1, -1, -1)))
prod = 1
for i in range(len(eigenvals) - 1, -1, -1):
prod *= 1 - eigenvals[i]
p = k_yvar - i
q = k_xvar - i
r = (nobs - k_yvar - 1) - (p - q + 1) / 2
u = (p * q - 2) / 4
df1 = p * q
if p ** 2 + q ** 2 - 5 > 0:
t = np.sqrt(((p * q) ** 2 - 4) / (p ** 2 + q ** 2 - 5))
else:
t = 1
df2 = r * t - 2 * u
lmd = np.power(prod, 1 / t)
F = (1 - lmd) / lmd * df2 / df1
stats.loc[i, 'Canonical Correlation'] = self.cancorr[i]
stats.loc[i, "Wilks' lambda"] = prod
stats.loc[i, 'Num DF'] = df1
stats.loc[i, 'Den DF'] = df2
stats.loc[i, 'F Value'] = F
pval = scipy.stats.f.sf(F, df1, df2)
stats.loc[i, 'Pr > F'] = pval
'''
# Wilk's Chi square test of each canonical correlation
df = (p - i + 1) * (q - i + 1)
chi2 = a * np.log(prod)
pval = stats.chi2.sf(chi2, df)
stats.loc[i, 'Canonical correlation'] = self.cancorr[i]
stats.loc[i, 'Chi-square'] = chi2
stats.loc[i, 'DF'] = df
stats.loc[i, 'Pr > ChiSq'] = pval
'''
ind = stats.index.values[::-1]
stats = stats.loc[ind, :]
# Multivariate tests (remember x has mean removed)
stats_mv = multivariate_stats(eigenvals,
k_yvar, k_xvar, nobs - k_xvar - 1)
return CanCorrTestResults(stats, stats_mv) | Approximate F test
Perform multivariate statistical tests of the hypothesis that
there is no canonical correlation between endog and exog.
For each canonical correlation, testing its significance based on
Wilks' lambda.
Returns
-------
CanCorrTestResults instance | corr_test | python | statsmodels/statsmodels | statsmodels/multivariate/cancorr.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/cancorr.py | BSD-3-Clause |
def _multivariate_ols_fit(endog, exog, method='svd', tolerance=1e-8):
"""
Solve multivariate linear model y = x * params
where y is dependent variables, x is independent variables
Parameters
----------
endog : array_like
each column is a dependent variable
exog : array_like
each column is a independent variable
method : str
'svd' - Singular value decomposition
'pinv' - Moore-Penrose pseudoinverse
tolerance : float, a small positive number
Tolerance for eigenvalue. Values smaller than tolerance is considered
zero.
Returns
-------
a tuple of matrices or values necessary for hypotheses testing
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
Notes
-----
Status: experimental and incomplete
"""
y = endog
x = exog
nobs, k_endog = y.shape #noqa: F841
nobs1, k_exog= x.shape
if nobs != nobs1:
raise ValueError('x(n=%d) and y(n=%d) should have the same number of '
'rows!' % (nobs1, nobs))
# Calculate the matrices necessary for hypotheses testing
df_resid = nobs - k_exog
if method == 'pinv':
# Regression coefficients matrix
pinv_x = pinv(x)
params = pinv_x.dot(y)
# inverse of x'x
inv_cov = pinv_x.dot(pinv_x.T)
if matrix_rank(inv_cov,tol=tolerance) < k_exog:
raise ValueError('Covariance of x singular!')
# Sums of squares and cross-products of residuals
# Y'Y - (X * params)'B * params
t = x.dot(params)
sscpr = np.subtract(y.T.dot(y), t.T.dot(t))
return (params, df_resid, inv_cov, sscpr)
elif method == 'svd':
u, s, v = svd(x, 0)
if (s > tolerance).sum() < len(s):
raise ValueError('Covariance of x singular!')
invs = 1. / s
params = v.T.dot(np.diag(invs)).dot(u.T).dot(y)
inv_cov = v.T.dot(np.diag(np.power(invs, 2))).dot(v)
t = np.diag(s).dot(v).dot(params)
sscpr = np.subtract(y.T.dot(y), t.T.dot(t))
return (params, df_resid, inv_cov, sscpr)
else:
raise ValueError('%s is not a supported method!' % method) | Solve multivariate linear model y = x * params
where y is dependent variables, x is independent variables
Parameters
----------
endog : array_like
each column is a dependent variable
exog : array_like
each column is a independent variable
method : str
'svd' - Singular value decomposition
'pinv' - Moore-Penrose pseudoinverse
tolerance : float, a small positive number
Tolerance for eigenvalue. Values smaller than tolerance is considered
zero.
Returns
-------
a tuple of matrices or values necessary for hypotheses testing
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
Notes
-----
Status: experimental and incomplete | _multivariate_ols_fit | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def multivariate_stats(eigenvals,
r_err_sscp,
r_contrast, df_resid, tolerance=1e-8):
"""
For multivariate linear model Y = X * B
Testing hypotheses
L*B*M = 0
where L is contrast matrix, B is the parameters of the
multivariate linear model and M is dependent variable transform matrix.
T = L*inv(X'X)*L'
H = M'B'L'*inv(T)*LBM
E = M'(Y'Y - B'X'XB)M
Parameters
----------
eigenvals : ndarray
The eigenvalues of inv(E + H)*H
r_err_sscp : int
Rank of E + H
r_contrast : int
Rank of T matrix
df_resid : int
Residual degree of freedom (n_samples minus n_variables of X)
tolerance : float
smaller than which eigenvalue is considered 0
Returns
-------
A DataFrame
References
----------
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
"""
v = df_resid
p = r_err_sscp
q = r_contrast
s = np.min([p, q])
ind = eigenvals > tolerance
# n_e = ind.sum()
eigv2 = eigenvals[ind]
eigv1 = np.array([i / (1 - i) for i in eigv2])
m = (np.abs(p - q) - 1) / 2
n = (v - p - 1) / 2
cols = ['Value', 'Num DF', 'Den DF', 'F Value', 'Pr > F']
index = ["Wilks' lambda", "Pillai's trace",
"Hotelling-Lawley trace", "Roy's greatest root"]
results = pd.DataFrame(columns=cols,
index=index)
def fn(x):
return np.real([x])[0]
results.loc["Wilks' lambda", 'Value'] = fn(np.prod(1 - eigv2))
results.loc["Pillai's trace", 'Value'] = fn(eigv2.sum())
results.loc["Hotelling-Lawley trace", 'Value'] = fn(eigv1.sum())
results.loc["Roy's greatest root", 'Value'] = fn(eigv1.max())
r = v - (p - q + 1)/2
u = (p*q - 2) / 4
df1 = p * q
if p*p + q*q - 5 > 0:
t = np.sqrt((p*p*q*q - 4) / (p*p + q*q - 5))
else:
t = 1
df2 = r*t - 2*u
lmd = results.loc["Wilks' lambda", 'Value']
lmd = np.power(lmd, 1 / t)
F = (1 - lmd) / lmd * df2 / df1
results.loc["Wilks' lambda", 'Num DF'] = df1
results.loc["Wilks' lambda", 'Den DF'] = df2
results.loc["Wilks' lambda", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Wilks' lambda", 'Pr > F'] = pval
V = results.loc["Pillai's trace", 'Value']
df1 = s * (2*m + s + 1)
df2 = s * (2*n + s + 1)
F = df2 / df1 * V / (s - V)
results.loc["Pillai's trace", 'Num DF'] = df1
results.loc["Pillai's trace", 'Den DF'] = df2
results.loc["Pillai's trace", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Pillai's trace", 'Pr > F'] = pval
U = results.loc["Hotelling-Lawley trace", 'Value']
if n > 0:
b = (p + 2*n) * (q + 2*n) / 2 / (2*n + 1) / (n - 1)
df1 = p * q
df2 = 4 + (p*q + 2) / (b - 1)
c = (df2 - 2) / 2 / n
F = df2 / df1 * U / c
else:
df1 = s * (2*m + s + 1)
df2 = s * (s*n + 1)
F = df2 / df1 / s * U
results.loc["Hotelling-Lawley trace", 'Num DF'] = df1
results.loc["Hotelling-Lawley trace", 'Den DF'] = df2
results.loc["Hotelling-Lawley trace", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Hotelling-Lawley trace", 'Pr > F'] = pval
sigma = results.loc["Roy's greatest root", 'Value']
r = np.max([p, q])
df1 = r
df2 = v - r + q
F = df2 / df1 * sigma
results.loc["Roy's greatest root", 'Num DF'] = df1
results.loc["Roy's greatest root", 'Den DF'] = df2
results.loc["Roy's greatest root", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Roy's greatest root", 'Pr > F'] = pval
return results | For multivariate linear model Y = X * B
Testing hypotheses
L*B*M = 0
where L is contrast matrix, B is the parameters of the
multivariate linear model and M is dependent variable transform matrix.
T = L*inv(X'X)*L'
H = M'B'L'*inv(T)*LBM
E = M'(Y'Y - B'X'XB)M
Parameters
----------
eigenvals : ndarray
The eigenvalues of inv(E + H)*H
r_err_sscp : int
Rank of E + H
r_contrast : int
Rank of T matrix
df_resid : int
Residual degree of freedom (n_samples minus n_variables of X)
tolerance : float
smaller than which eigenvalue is considered 0
Returns
-------
A DataFrame
References
----------
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm | multivariate_stats | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def _multivariate_test(hypotheses, exog_names, endog_names, fn):
"""
Multivariate linear model hypotheses testing
For y = x * params, where y are the dependent variables and x are the
independent variables, testing L * params * M = 0 where L is the contrast
matrix for hypotheses testing and M is the transformation matrix for
transforming the dependent variables in y.
Algorithm:
T = L*inv(X'X)*L'
H = M'B'L'*inv(T)*LBM
E = M'(Y'Y - B'X'XB)M
where H and E correspond to the numerator and denominator of a univariate
F-test. Then find the eigenvalues of inv(H + E)*H from which the
multivariate test statistics are calculated.
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML
/default/viewer.htm#statug_introreg_sect012.htm
Parameters
----------
%(hypotheses_doc)s
k_xvar : int
The number of independent variables
k_yvar : int
The number of dependent variables
fn : function
a function fn(contrast_L, transform_M) that returns E, H, q, df_resid
where q is the rank of T matrix
Returns
-------
results : MANOVAResults
"""
k_xvar = len(exog_names)
k_yvar = len(endog_names)
results = {}
for hypo in hypotheses:
if len(hypo) ==2:
name, L = hypo
M = None
C = None
elif len(hypo) == 3:
name, L, M = hypo
C = None
elif len(hypo) == 4:
name, L, M, C = hypo
else:
raise ValueError('hypotheses must be a tuple of length 2, 3 or 4.'
' len(hypotheses)=%d' % len(hypo))
mgr = FormulaManager()
if any(isinstance(j, str) for j in L):
L = mgr.get_linear_constraints(L, variable_names=exog_names).constraint_matrix
else:
if not isinstance(L, np.ndarray) or len(L.shape) != 2:
raise ValueError('Contrast matrix L must be a 2-d array!')
if L.shape[1] != k_xvar:
raise ValueError('Contrast matrix L should have the same '
'number of columns as exog! %d != %d' %
(L.shape[1], k_xvar))
if M is None:
M = np.eye(k_yvar)
elif any(isinstance(j, str) for j in M):
M = mgr.get_linear_constraints(M, variable_names=endog_names).constraint_matrix.T
else:
if M is not None:
if not isinstance(M, np.ndarray) or len(M.shape) != 2:
raise ValueError('Transform matrix M must be a 2-d array!')
if M.shape[0] != k_yvar:
raise ValueError('Transform matrix M should have the same '
'number of rows as the number of columns '
'of endog! %d != %d' %
(M.shape[0], k_yvar))
if C is None:
C = np.zeros([L.shape[0], M.shape[1]])
elif not isinstance(C, np.ndarray):
raise ValueError('Constant matrix C must be a 2-d array!')
if C.shape[0] != L.shape[0]:
raise ValueError('contrast L and constant C must have the same '
'number of rows! %d!=%d'
% (L.shape[0], C.shape[0]))
if C.shape[1] != M.shape[1]:
raise ValueError('transform M and constant C must have the same '
'number of columns! %d!=%d'
% (M.shape[1], C.shape[1]))
E, H, q, df_resid = fn(L, M, C)
EH = np.add(E, H)
p = matrix_rank(EH)
# eigenvalues of inv(E + H)H
eigv2 = np.sort(eigvals(solve(EH, H)))
stat_table = multivariate_stats(eigv2, p, q, df_resid)
results[name] = {'stat': stat_table, 'contrast_L': L,
'transform_M': M, 'constant_C': C,
'E': E, 'H': H}
return results | Multivariate linear model hypotheses testing
For y = x * params, where y are the dependent variables and x are the
independent variables, testing L * params * M = 0 where L is the contrast
matrix for hypotheses testing and M is the transformation matrix for
transforming the dependent variables in y.
Algorithm:
T = L*inv(X'X)*L'
H = M'B'L'*inv(T)*LBM
E = M'(Y'Y - B'X'XB)M
where H and E correspond to the numerator and denominator of a univariate
F-test. Then find the eigenvalues of inv(H + E)*H from which the
multivariate test statistics are calculated.
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML
/default/viewer.htm#statug_introreg_sect012.htm
Parameters
----------
%(hypotheses_doc)s
k_xvar : int
The number of independent variables
k_yvar : int
The number of dependent variables
fn : function
a function fn(contrast_L, transform_M) that returns E, H, q, df_resid
where q is the rank of T matrix
Returns
-------
results : MANOVAResults | _multivariate_test | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def mv_test(self, hypotheses=None, skip_intercept_test=False):
"""
Linear hypotheses testing
Parameters
----------
%(hypotheses_doc)s
skip_intercept_test : bool
If true, then testing the intercept is skipped, the model is not
changed.
Note: If a term has a numerically insignificant effect, then
an exception because of emtpy arrays may be raised. This can
happen for the intercept if the data has been demeaned.
Returns
-------
results: _MultivariateOLSResults
Notes
-----
Tests hypotheses of the form
L * params * M = C
where `params` is the regression coefficient matrix for the
linear model y = x * params, `L` is the contrast matrix, `M` is the
dependent variable transform matrix and C is the constant matrix.
"""
mgr = FormulaManager()
k_xvar = len(self.exog_names)
if hypotheses is None:
if self.model_spec is not None:
terms = mgr.get_term_name_slices(self.model_spec)
hypotheses = []
for key in terms:
if skip_intercept_test and (key == 'Intercept' or key == mgr.intercept_term):
continue
L_contrast = np.eye(k_xvar)[terms[key], :]
test_name = str(key)
if key == mgr.intercept_term:
test_name = 'Intercept'
hypotheses.append([test_name, L_contrast, None])
else:
hypotheses = []
for i in range(k_xvar):
name = 'x%d' % (i)
L = np.zeros([1, k_xvar])
L[0, i] = 1
hypotheses.append([name, L, None])
results = _multivariate_ols_test(hypotheses, self._fittedmod,
self.exog_names, self.endog_names)
return MultivariateTestResults(results,
self.endog_names,
self.exog_names) | Linear hypotheses testing
Parameters
----------
%(hypotheses_doc)s
skip_intercept_test : bool
If true, then testing the intercept is skipped, the model is not
changed.
Note: If a term has a numerically insignificant effect, then
an exception because of emtpy arrays may be raised. This can
happen for the intercept if the data has been demeaned.
Returns
-------
results: _MultivariateOLSResults
Notes
-----
Tests hypotheses of the form
L * params * M = C
where `params` is the regression coefficient matrix for the
linear model y = x * params, `L` is the contrast matrix, `M` is the
dependent variable transform matrix and C is the constant matrix. | mv_test | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def _hat_matrix_diag(self):
"""Diagonal of the hat_matrix for OLS
Notes
-----
temporarily calculated here, this should go to model or influence class
"""
# computation base on OLSInfluence method
exog = self.model.exog
pinv_wexog = np.linalg.pinv(exog)
return (exog * pinv_wexog.T).sum(1) | Diagonal of the hat_matrix for OLS
Notes
-----
temporarily calculated here, this should go to model or influence class | _hat_matrix_diag | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def mv_test(self, hypotheses=None, skip_intercept_test=False):
"""
Linear hypotheses testing
Parameters
----------
%(hypotheses_doc)s
skip_intercept_test : bool
If true, then testing the intercept is skipped, the model is not
changed.
Note: If a term has a numerically insignificant effect, then
an exception because of emtpy arrays may be raised. This can
happen for the intercept if the data has been demeaned.
Returns
-------
results: _MultivariateOLSResults
Notes
-----
Tests hypotheses of the form
L * params * M = C
where `params` is the regression coefficient matrix for the
linear model y = x * params, `L` is the contrast matrix, `M` is the
dependent variable transform matrix and C is the constant matrix.
"""
k_xvar = len(self.model.exog_names)
if hypotheses is None:
if self.model.data.model_spec is not None:
mgr = FormulaManager()
terms = mgr.get_term_name_slices(self.model.data.model_spec)
hypotheses = []
for key in terms:
if skip_intercept_test and (key == 'Intercept' or key == mgr.intercept_term):
continue
L_contrast = np.eye(k_xvar)[terms[key], :]
test_name = str(key)
if key == mgr.intercept_term:
test_name = 'Intercept'
hypotheses.append([test_name, L_contrast, None])
else:
hypotheses = []
for i in range(k_xvar):
name = f'x{i:d}'
L = np.zeros([1, k_xvar])
L[0, i] = 1
hypotheses.append([name, L, None])
results = _multivariate_ols_test(hypotheses, self._fittedmod,
self.model.exog_names, self.model.endog_names)
return MultivariateTestResults(results,
self.model.endog_names,
self.model.exog_names) | Linear hypotheses testing
Parameters
----------
%(hypotheses_doc)s
skip_intercept_test : bool
If true, then testing the intercept is skipped, the model is not
changed.
Note: If a term has a numerically insignificant effect, then
an exception because of emtpy arrays may be raised. This can
happen for the intercept if the data has been demeaned.
Returns
-------
results: _MultivariateOLSResults
Notes
-----
Tests hypotheses of the form
L * params * M = C
where `params` is the regression coefficient matrix for the
linear model y = x * params, `L` is the contrast matrix, `M` is the
dependent variable transform matrix and C is the constant matrix. | mv_test | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
"""
Summarize the Regression Results.
Parameters
----------
yname : str, optional
The name of the endog variable in the tables. The default is `y`.
xname : list[str], optional
The names for the exogenous variables, default is "var_xx".
Must match the number of parameters in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
Returns
-------
Summary
Class that holds the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : Class that hold summary results.
"""
# used in generic part of io summary
self.nobs = self.model.nobs
self.df_model = self.model.k_endog * (self.model.k_exog - 1)
top_left = [('Dep. Variable:', None),
('Model:', [self.model.__class__.__name__]),
('Method:', [self.method]),
('Date:', None),
('Time:', None),
# ('converged:', ["%s" % self.mle_retvals['converged']]),
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
# ('Pseudo R-squ.:', ["%#6.4g" % self.prsquared]),
# ('Log-Likelihood:', None),
# ('LL-Null:', ["%#8.5g" % self.llnull]),
# ('LLR p-value:', ["%#6.4g" % self.llr_pvalue])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
# boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
yname, yname_list = self._get_endog_name(yname, yname_list)
# for top of table
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
# for parameters, etc
smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha,
use_t=self.use_t)
if hasattr(self, 'constraints'):
smry.add_extra_txt(['Model has been estimated subject to linear '
'equality constraints.'])
return smry | Summarize the Regression Results.
Parameters
----------
yname : str, optional
The name of the endog variable in the tables. The default is `y`.
xname : list[str], optional
The names for the exogenous variables, default is "var_xx".
Must match the number of parameters in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
Returns
-------
Summary
Class that holds the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : Class that hold summary results. | summary | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def summary_frame(self):
"""
Return results as a multiindex dataframe
"""
df = []
for key in self.results:
tmp = self.results[key]['stat'].copy()
tmp.loc[:, 'Effect'] = key
df.append(tmp.reset_index())
df = pd.concat(df, axis=0)
df = df.set_index(['Effect', 'index'])
df.index.set_names(['Effect', 'Statistic'], inplace=True)
return df | Return results as a multiindex dataframe | summary_frame | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def summary(self, show_contrast_L=False, show_transform_M=False,
show_constant_C=False):
"""
Summary of test results
Parameters
----------
show_contrast_L : bool
Whether to show contrast_L matrix
show_transform_M : bool
Whether to show transform_M matrix
show_constant_C : bool
Whether to show the constant_C
"""
summ = summary2.Summary()
summ.add_title('Multivariate linear model')
for key in self.results:
summ.add_dict({'': ''})
df = self.results[key]['stat'].copy()
df = df.reset_index()
c = list(df.columns)
c[0] = key
df.columns = c
df.index = ['', '', '', '']
summ.add_df(df)
if show_contrast_L:
summ.add_dict({key: ' contrast L='})
df = pd.DataFrame(self.results[key]['contrast_L'],
columns=self.exog_names)
summ.add_df(df)
if show_transform_M:
summ.add_dict({key: ' transform M='})
df = pd.DataFrame(self.results[key]['transform_M'],
index=self.endog_names)
summ.add_df(df)
if show_constant_C:
summ.add_dict({key: ' constant C='})
df = pd.DataFrame(self.results[key]['constant_C'])
summ.add_df(df)
return summ | Summary of test results
Parameters
----------
show_contrast_L : bool
Whether to show contrast_L matrix
show_transform_M : bool
Whether to show transform_M matrix
show_constant_C : bool
Whether to show the constant_C | summary | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def test_1factor():
"""
# R code:
r = 0.4
p = 4
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
fa = factanal(covmat=cm, factors=1)
print(fa, digits=10)
"""
r = 0.4
p = 4
ii = np.arange(p)
cm = r ** np.abs(np.subtract.outer(ii, ii))
fa = Factor(corr=cm, n_factor=1, method='ml')
rslt = fa.fit()
if rslt.loadings[0, 0] < 0:
rslt.loadings[:, 0] *= -1
# R solution, but our likelihood is higher
# uniq = np.r_[0.8392472054, 0.5820958187, 0.5820958187, 0.8392472054]
# load = np.asarray([[0.4009399224, 0.6464550935, 0.6464550935,
# 0.4009399224]]).T
# l1 = fa.loglike(fa._pack(load, uniq))
# l2 = fa.loglike(fa._pack(rslt.loadings, rslt.uniqueness))
# So use a smoke test
uniq = np.r_[0.85290232, 0.60916033, 0.55382266, 0.82610666]
load = np.asarray([[0.38353316], [0.62517171], [0.66796508],
[0.4170052]])
assert_allclose(load, rslt.loadings, rtol=1e-3, atol=1e-3)
assert_allclose(uniq, rslt.uniqueness, rtol=1e-3, atol=1e-3)
assert_equal(rslt.df, 2) | # R code:
r = 0.4
p = 4
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
fa = factanal(covmat=cm, factors=1)
print(fa, digits=10) | test_1factor | python | statsmodels/statsmodels | statsmodels/multivariate/tests/test_ml_factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/tests/test_ml_factor.py | BSD-3-Clause |
def test_2factor():
"""
# R code:
r = 0.4
p = 6
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
factanal(covmat=cm, factors=2)
"""
r = 0.4
p = 6
ii = np.arange(p)
cm = r ** np.abs(np.subtract.outer(ii, ii))
fa = Factor(corr=cm, n_factor=2, nobs=100, method='ml')
rslt = fa.fit()
for j in 0, 1:
if rslt.loadings[0, j] < 0:
rslt.loadings[:, j] *= -1
uniq = np.r_[0.782, 0.367, 0.696, 0.696, 0.367, 0.782]
assert_allclose(uniq, rslt.uniqueness, rtol=1e-3, atol=1e-3)
loads = [np.r_[0.323, 0.586, 0.519, 0.519, 0.586, 0.323],
np.r_[0.337, 0.538, 0.187, -0.187, -0.538, -0.337]]
for k in 0, 1:
if np.dot(loads[k], rslt.loadings[:, k]) < 0:
loads[k] *= -1
assert_allclose(loads[k], rslt.loadings[:, k], rtol=1e-3, atol=1e-3)
assert_equal(rslt.df, 4)
# Smoke test for standard errors
e = np.asarray([0.11056836, 0.05191071, 0.09836349,
0.09836349, 0.05191071, 0.11056836])
assert_allclose(rslt.uniq_stderr, e, atol=1e-4)
e = np.asarray([[0.08842151, 0.08842151], [0.06058582, 0.06058582],
[0.08339874, 0.08339874], [0.08339874, 0.08339874],
[0.06058582, 0.06058582], [0.08842151, 0.08842151]])
assert_allclose(rslt.load_stderr, e, atol=1e-4) | # R code:
r = 0.4
p = 6
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
factanal(covmat=cm, factors=2) | test_2factor | python | statsmodels/statsmodels | statsmodels/multivariate/tests/test_ml_factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/tests/test_ml_factor.py | BSD-3-Clause |
def test_manova_test_input_validation():
mod = MANOVA.from_formula('Basal + Occ + Max ~ Loc', data=X)
hypothesis = [('test', np.array([[1, 1, 1]]), None)]
mod.mv_test(hypothesis)
hypothesis = [('test', np.array([[1, 1]]), None)]
assert_raises(ValueError, mod.mv_test, hypothesis)
"""
assert_raises_regex(ValueError,
('Contrast matrix L should have the same number of '
'columns as exog! 2 != 3'),
mod.mv_test, hypothesis)
"""
hypothesis = [('test', np.array([[1, 1, 1]]), np.array([[1], [1], [1]]))]
mod.mv_test(hypothesis)
hypothesis = [('test', np.array([[1, 1, 1]]), np.array([[1], [1]]))]
assert_raises(ValueError, mod.mv_test, hypothesis)
"""
assert_raises_regex(ValueError,
('Transform matrix M should have the same number of '
'rows as the number of columns of endog! 2 != 3'),
mod.mv_test, hypothesis)
""" | assert_raises_regex(ValueError,
('Contrast matrix L should have the same number of '
'columns as exog! 2 != 3'),
mod.mv_test, hypothesis) | test_manova_test_input_validation | python | statsmodels/statsmodels | statsmodels/multivariate/tests/test_manova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/tests/test_manova.py | BSD-3-Clause |
def compare_r_output_dogs_data(method, model):
''' Testing within-subject effect interact with 2 between-subject effect
Compares with R car library Anova(, type=3) output
Note: The test statistis Phillai, Wilks, Hotelling-Lawley
and Roy are the same as R output but the approximate F and degree
of freedoms can be different. This is due to the fact that this
implementation is based on SAS formula [1]
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/
viewer.htm#statug_introreg_sect012.htm
'''
# Repeated measures with orthogonal polynomial contrasts coding
mod = model.from_formula(
'Histamine0 + Histamine1 + Histamine3 + Histamine5 ~ Drug * Depleted',
data)
r = mod.fit(method=method)
r = r.mv_test()
a = [[2.68607660e-02, 4, 6, 5.43435304e+01, 7.59585610e-05],
[9.73139234e-01, 4, 6, 5.43435304e+01, 7.59585610e-05],
[3.62290202e+01, 4, 6, 5.43435304e+01, 7.59585610e-05],
[3.62290202e+01, 4, 6, 5.43435304e+01, 7.59585610e-05]]
assert_array_almost_equal(r['Intercept']['stat'].values, a, decimal=6)
a = [[8.39646619e-02, 8, 1.20000000e+01, 3.67658068e+00, 2.12614444e-02],
[1.18605382e+00, 8, 1.40000000e+01, 2.55003861e+00, 6.01270701e-02],
[7.69391362e+00, 8, 6.63157895e+00, 5.50814270e+00, 2.07392260e-02],
[7.25036952e+00, 4, 7.00000000e+00, 1.26881467e+01, 2.52669877e-03]]
assert_array_almost_equal(r['Drug']['stat'].values, a, decimal=6)
a = [[0.32048892, 4., 6., 3.18034906, 0.10002373],
[0.67951108, 4., 6., 3.18034906, 0.10002373],
[2.12023271, 4., 6., 3.18034906, 0.10002373],
[2.12023271, 4., 6., 3.18034906, 0.10002373]]
assert_array_almost_equal(r['Depleted']['stat'].values, a, decimal=6)
a = [[0.15234366, 8., 12., 2.34307678, 0.08894239],
[1.13013353, 8., 14., 2.27360606, 0.08553213],
[3.70989596, 8., 6.63157895, 2.65594824, 0.11370285],
[3.1145597, 4., 7., 5.45047947, 0.02582767]]
assert_array_almost_equal(r['Drug:Depleted']['stat'].values, a, decimal=6) | Testing within-subject effect interact with 2 between-subject effect
Compares with R car library Anova(, type=3) output
Note: The test statistis Phillai, Wilks, Hotelling-Lawley
and Roy are the same as R output but the approximate F and degree
of freedoms can be different. This is due to the fact that this
implementation is based on SAS formula [1]
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/
viewer.htm#statug_introreg_sect012.htm | compare_r_output_dogs_data | python | statsmodels/statsmodels | statsmodels/multivariate/tests/test_multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/tests/test_multivariate_ols.py | BSD-3-Clause |
def Gf(T, ff):
"""
Subroutine for the gradient of f using numerical derivatives.
"""
k = T.shape[0]
ep = 1e-4
G = np.zeros((k, k))
for r in range(k):
for s in range(k):
dT = np.zeros((k, k))
dT[r, s] = ep
G[r, s] = (ff(T+dT)-ff(T-dT))/(2*ep)
return G | Subroutine for the gradient of f using numerical derivatives. | Gf | python | statsmodels/statsmodels | statsmodels/multivariate/factor_rotation/_gpa_rotation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/_gpa_rotation.py | BSD-3-Clause |
def test_target_rotation(self):
"""
Rotation towards target matrix example
http://www.stat.ucla.edu/research/gpa
"""
A = self.str2matrix("""
.830 -.396
.818 -.469
.777 -.470
.798 -.401
.786 .500
.672 .458
.594 .444
.647 .333
""")
H = self.str2matrix("""
.8 -.3
.8 -.4
.7 -.4
.9 -.4
.8 .5
.6 .4
.5 .4
.6 .3
""")
T = target_rotation(A, H)
L = A.dot(T)
L_required = self.str2matrix("""
0.84168 -0.37053
0.83191 -0.44386
0.79096 -0.44611
0.80985 -0.37650
0.77040 0.52371
0.65774 0.47826
0.58020 0.46189
0.63656 0.35255
""")
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
T = target_rotation(A, H, full_rank=True)
L = A.dot(T)
self.assertTrue(np.allclose(L, L_required, atol=1e-05)) | Rotation towards target matrix example
http://www.stat.ucla.edu/research/gpa | test_target_rotation | python | statsmodels/statsmodels | statsmodels/multivariate/factor_rotation/tests/test_rotation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.