code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def to_vecm(self):
"""to_vecm"""
k = self.coefs.shape[1]
p = self.coefs.shape[0]
A = self.coefs
pi = -(np.identity(k) - np.sum(A, 0))
gamma = np.zeros((p - 1, k, k))
for i in range(p - 1):
gamma[i] = -(np.sum(A[i + 1 :], 0))
gamma = np.concatenate(gamma, 1)
return {"Gamma": gamma, "Pi": pi} | to_vecm | to_vecm | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def plot(self):
"""Plot input time series"""
return plotting.plot_mts(self.endog, names=self.names, index=self.dates) | Plot input time series | plot | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def df_model(self):
"""
Number of estimated parameters per variable, including the intercept / trends
"""
return self.neqs * self.k_ar + self.k_exog | Number of estimated parameters per variable, including the intercept / trends | df_model | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def df_resid(self):
"""Number of observations minus number of estimated parameters"""
return self.nobs - self.df_model | Number of observations minus number of estimated parameters | df_resid | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def fittedvalues(self):
"""
The predicted insample values of the response variables of the model.
"""
return np.dot(self.endog_lagged, self.params) | The predicted insample values of the response variables of the model. | fittedvalues | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def resid(self):
"""
Residuals of response variable resulting from estimated coefficients
"""
return self.endog[self.k_ar :] - self.fittedvalues | Residuals of response variable resulting from estimated coefficients | resid | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def sample_acov(self, nlags=1):
"""Sample acov"""
return _compute_acov(self.endog[self.k_ar :], nlags=nlags) | Sample acov | sample_acov | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def sample_acorr(self, nlags=1):
"""Sample acorr"""
acovs = self.sample_acov(nlags=nlags)
return _acovs_to_acorrs(acovs) | Sample acorr | sample_acorr | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def plot_sample_acorr(self, nlags=10, linewidth=8):
"""
Plot sample autocorrelation function
Parameters
----------
nlags : int
The number of lags to use in compute the autocorrelation. Does
not count the zero lag, which will be returned.
linewidth : int
The linewidth for the plots.
Returns
-------
Figure
The figure that contains the plot axes.
"""
fig = plotting.plot_full_acorr(
self.sample_acorr(nlags=nlags), linewidth=linewidth
)
return fig | Plot sample autocorrelation function
Parameters
----------
nlags : int
The number of lags to use in compute the autocorrelation. Does
not count the zero lag, which will be returned.
linewidth : int
The linewidth for the plots.
Returns
-------
Figure
The figure that contains the plot axes. | plot_sample_acorr | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def resid_acov(self, nlags=1):
"""
Compute centered sample autocovariance (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
return _compute_acov(self.resid, nlags=nlags) | Compute centered sample autocovariance (including lag 0)
Parameters
----------
nlags : int
Returns
------- | resid_acov | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def resid_acorr(self, nlags=1):
"""
Compute sample autocorrelation (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
acovs = self.resid_acov(nlags=nlags)
return _acovs_to_acorrs(acovs) | Compute sample autocorrelation (including lag 0)
Parameters
----------
nlags : int
Returns
------- | resid_acorr | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def resid_corr(self):
"""
Centered residual correlation matrix
"""
return self.resid_acorr(0)[0] | Centered residual correlation matrix | resid_corr | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def sigma_u_mle(self):
"""(Biased) maximum likelihood estimate of noise process covariance"""
if not self.df_resid:
return np.zeros_like(self.sigma_u)
return self.sigma_u * self.df_resid / self.nobs | (Biased) maximum likelihood estimate of noise process covariance | sigma_u_mle | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def cov_params(self):
"""Estimated variance-covariance of model coefficients
Notes
-----
Covariance of vec(B), where B is the matrix
[params_for_deterministic_terms, A_1, ..., A_p] with the shape
(K x (Kp + number_of_deterministic_terms))
Adjusted to be an unbiased estimator
Ref: Lütkepohl p.74-75
"""
z = self.endog_lagged
return np.kron(np.linalg.inv(z.T @ z), self.sigma_u) | Estimated variance-covariance of model coefficients
Notes
-----
Covariance of vec(B), where B is the matrix
[params_for_deterministic_terms, A_1, ..., A_p] with the shape
(K x (Kp + number_of_deterministic_terms))
Adjusted to be an unbiased estimator
Ref: Lütkepohl p.74-75 | cov_params | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def _cov_alpha(self):
"""
Estimated covariance matrix of model coefficients w/o exog
"""
# drop exog
kn = self.k_exog * self.neqs
return self.cov_params()[kn:, kn:] | Estimated covariance matrix of model coefficients w/o exog | _cov_alpha | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def _cov_sigma(self):
"""
Estimated covariance matrix of vech(sigma_u)
"""
D_K = tsa.duplication_matrix(self.neqs)
D_Kinv = np.linalg.pinv(D_K)
sigxsig = np.kron(self.sigma_u, self.sigma_u)
return 2 * D_Kinv @ sigxsig @ D_Kinv.T | Estimated covariance matrix of vech(sigma_u) | _cov_sigma | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def stderr(self):
"""Standard errors of coefficients, reshaped to match in size"""
stderr = np.sqrt(np.diag(self.cov_params()))
return stderr.reshape((self.df_model, self.neqs), order="C") | Standard errors of coefficients, reshaped to match in size | stderr | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def stderr_endog_lagged(self):
"""Stderr_endog_lagged"""
start = self.k_exog
return self.stderr[start:] | Stderr_endog_lagged | stderr_endog_lagged | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def stderr_dt(self):
"""Stderr_dt"""
end = self.k_exog
return self.stderr[:end] | Stderr_dt | stderr_dt | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def tvalues(self):
"""
Compute t-statistics. Use Student-t(T - Kp - 1) = t(df_resid) to
test significance.
"""
return self.params / self.stderr | Compute t-statistics. Use Student-t(T - Kp - 1) = t(df_resid) to
test significance. | tvalues | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def tvalues_endog_lagged(self):
"""tvalues_endog_lagged"""
start = self.k_exog
return self.tvalues[start:] | tvalues_endog_lagged | tvalues_endog_lagged | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def tvalues_dt(self):
"""tvalues_dt"""
end = self.k_exog
return self.tvalues[:end] | tvalues_dt | tvalues_dt | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def pvalues(self):
"""
Two-sided p-values for model coefficients from Student t-distribution
"""
# return stats.t.sf(np.abs(self.tvalues), self.df_resid)*2
return 2 * stats.norm.sf(np.abs(self.tvalues)) | Two-sided p-values for model coefficients from Student t-distribution | pvalues | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def pvalues_endog_lagged(self):
"""pvalues_endog_laggd"""
start = self.k_exog
return self.pvalues[start:] | pvalues_endog_laggd | pvalues_endog_lagged | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def pvalues_dt(self):
"""pvalues_dt"""
end = self.k_exog
return self.pvalues[:end] | pvalues_dt | pvalues_dt | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def plot_forecast(self, steps, alpha=0.05, plot_stderr=True):
"""
Plot forecast
"""
mid, lower, upper = self.forecast_interval(
self.endog[-self.k_ar :], steps, alpha=alpha
)
fig = plotting.plot_var_forc(
self.endog,
mid,
lower,
upper,
names=self.names,
plot_stderr=plot_stderr,
)
return fig | Plot forecast | plot_forecast | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def irf_errband_mc(
self,
orth=False,
repl=1000,
steps=10,
signif=0.05,
seed=None,
burn=100,
cum=False,
):
"""
Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse response error bands
repl : int
number of Monte Carlo replications to perform
steps : int, default 10
number of impulse response periods
signif : float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed : int
np.random.seed for replications
burn : int
number of initial observations to discard for simulation
cum : bool, default False
produce cumulative irf error bands
Notes
-----
Lütkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors
"""
ma_coll = self.irf_resim(
orth=orth, repl=repl, steps=steps, seed=seed, burn=burn, cum=cum
)
ma_sort = np.sort(ma_coll, axis=0) # sort to get quantiles
# python 2: round returns float
low_idx = int(round(signif / 2 * repl) - 1)
upp_idx = int(round((1 - signif / 2) * repl) - 1)
lower = ma_sort[low_idx, :, :, :]
upper = ma_sort[upp_idx, :, :, :]
return lower, upper | Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse response error bands
repl : int
number of Monte Carlo replications to perform
steps : int, default 10
number of impulse response periods
signif : float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed : int
np.random.seed for replications
burn : int
number of initial observations to discard for simulation
cum : bool, default False
produce cumulative irf error bands
Notes
-----
Lütkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors | irf_errband_mc | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def irf_resim(
self, orth=False, repl=1000, steps=10, seed=None, burn=100, cum=False
):
"""
Simulates impulse response function, returning an array of simulations.
Used for Sims-Zha error band calculation.
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse response error bands
repl : int
number of Monte Carlo replications to perform
steps : int, default 10
number of impulse response periods
signif : float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed : int
np.random.seed for replications
burn : int
number of initial observations to discard for simulation
cum : bool, default False
produce cumulative irf error bands
Notes
-----
.. [*] Sims, Christoper A., and Tao Zha. 1999. "Error Bands for Impulse
Response." Econometrica 67: 1113-1155.
Returns
-------
Array of simulated impulse response functions
"""
neqs = self.neqs
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
nobs = self.nobs
nobs_original = nobs + k_ar
ma_coll = np.zeros((repl, steps + 1, neqs, neqs))
def fill_coll(sim):
ret = VAR(sim, exog=self.exog).fit(maxlags=k_ar, trend=self.trend)
ret = ret.orth_ma_rep(maxn=steps) if orth else ret.ma_rep(maxn=steps)
return ret.cumsum(axis=0) if cum else ret
for i in range(repl):
# discard first burn to eliminate correct for starting bias
sim = util.varsim(
coefs,
intercept,
sigma_u,
seed=seed,
steps=nobs_original + burn,
)
sim = sim[burn:]
ma_coll[i, :, :, :] = fill_coll(sim)
return ma_coll | Simulates impulse response function, returning an array of simulations.
Used for Sims-Zha error band calculation.
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse response error bands
repl : int
number of Monte Carlo replications to perform
steps : int, default 10
number of impulse response periods
signif : float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed : int
np.random.seed for replications
burn : int
number of initial observations to discard for simulation
cum : bool, default False
produce cumulative irf error bands
Notes
-----
.. [*] Sims, Christoper A., and Tao Zha. 1999. "Error Bands for Impulse
Response." Econometrica 67: 1113-1155.
Returns
-------
Array of simulated impulse response functions | irf_resim | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def summary(self):
"""Compute console output summary of estimates
Returns
-------
summary : VARSummary
"""
return VARSummary(self) | Compute console output summary of estimates
Returns
-------
summary : VARSummary | summary | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def irf(self, periods=10, var_decomp=None, var_order=None):
"""Analyze impulse responses to shocks in system
Parameters
----------
periods : int
var_decomp : ndarray (k x k), lower triangular
Must satisfy Omega = P P', where P is the passed matrix. Defaults
to Cholesky decomposition of Omega
var_order : sequence
Alternate variable order for Cholesky decomposition
Returns
-------
irf : IRAnalysis
"""
if var_order is not None:
raise NotImplementedError(
"alternate variable order not implemented" " (yet)"
)
return IRAnalysis(self, P=var_decomp, periods=periods) | Analyze impulse responses to shocks in system
Parameters
----------
periods : int
var_decomp : ndarray (k x k), lower triangular
Must satisfy Omega = P P', where P is the passed matrix. Defaults
to Cholesky decomposition of Omega
var_order : sequence
Alternate variable order for Cholesky decomposition
Returns
-------
irf : IRAnalysis | irf | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def fevd(self, periods=10, var_decomp=None):
"""
Compute forecast error variance decomposition ("fevd")
Returns
-------
fevd : FEVD instance
"""
return FEVD(self, P=var_decomp, periods=periods) | Compute forecast error variance decomposition ("fevd")
Returns
-------
fevd : FEVD instance | fevd | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def reorder(self, order):
"""Reorder variables for structural specification"""
if len(order) != len(self.params[0, :]):
raise ValueError(
"Reorder specification length should match "
"number of endogenous variables"
)
# This converts order to list of integers if given as strings
if isinstance(order[0], str):
order_new = []
for i, nam in enumerate(order):
order_new.append(self.names.index(order[i]))
order = order_new
return _reordered(self, order) | Reorder variables for structural specification | reorder | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def test_causality(self, caused, causing=None, kind="f", signif=0.05):
"""
Test Granger causality
Parameters
----------
caused : int or str or sequence of int or str
If int or str, test whether the variable specified via this index
(int) or name (str) is Granger-caused by the variable(s) specified
by `causing`.
If a sequence of int or str, test whether the corresponding
variables are Granger-caused by the variable(s) specified
by `causing`.
causing : int or str or sequence of int or str or None, default: None
If int or str, test whether the variable specified via this index
(int) or name (str) is Granger-causing the variable(s) specified by
`caused`.
If a sequence of int or str, test whether the corresponding
variables are Granger-causing the variable(s) specified by
`caused`.
If None, `causing` is assumed to be the complement of `caused`.
kind : {'f', 'wald'}
Perform F-test or Wald (chi-sq) test
signif : float, default 5%
Significance level for computing critical values for test,
defaulting to standard 0.05 level
Notes
-----
Null hypothesis is that there is no Granger-causality for the indicated
variables. The degrees of freedom in the F-test are based on the
number of variables in the VAR system, that is, degrees of freedom
are equal to the number of equations in the VAR times degree of freedom
of a single equation.
Test for Granger-causality as described in chapter 7.6.3 of [1]_.
Test H0: "`causing` does not Granger-cause the remaining variables of
the system" against H1: "`causing` is Granger-causal for the
remaining variables".
Returns
-------
results : CausalityTestResults
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer.
"""
if not (0 < signif < 1):
raise ValueError("signif has to be between 0 and 1")
allowed_types = (str, int)
if isinstance(caused, allowed_types):
caused = [caused]
if not all(isinstance(c, allowed_types) for c in caused):
raise TypeError(
"caused has to be of type string or int (or a "
"sequence of these types)."
)
caused = [self.names[c] if type(c) is int else c for c in caused]
caused_ind = [util.get_index(self.names, c) for c in caused]
if causing is not None:
if isinstance(causing, allowed_types):
causing = [causing]
if not all(isinstance(c, allowed_types) for c in causing):
raise TypeError(
"causing has to be of type string or int (or "
"a sequence of these types) or None."
)
causing = [self.names[c] if type(c) is int else c for c in causing]
causing_ind = [util.get_index(self.names, c) for c in causing]
else:
causing_ind = [i for i in range(self.neqs) if i not in caused_ind]
causing = [self.names[c] for c in caused_ind]
k, p = self.neqs, self.k_ar
if p == 0:
err = "Cannot test Granger Causality in a model with 0 lags."
raise RuntimeError(err)
# number of restrictions
num_restr = len(causing) * len(caused) * p
num_det_terms = self.k_exog
# Make restriction matrix
C = np.zeros((num_restr, k * num_det_terms + k**2 * p), dtype=float)
cols_det = k * num_det_terms
row = 0
for j in range(p):
for ing_ind in causing_ind:
for ed_ind in caused_ind:
C[row, cols_det + ed_ind + k * ing_ind + k**2 * j] = 1
row += 1
# Lütkepohl 3.6.5
Cb = np.dot(C, vec(self.params.T))
middle = np.linalg.inv(C @ self.cov_params() @ C.T)
# wald statistic
lam_wald = statistic = Cb @ middle @ Cb
if kind.lower() == "wald":
df = num_restr
dist = stats.chi2(df)
elif kind.lower() == "f":
statistic = lam_wald / num_restr
df = (num_restr, k * self.df_resid)
dist = stats.f(*df)
else:
raise ValueError("kind %s not recognized" % kind)
pvalue = dist.sf(statistic)
crit_value = dist.ppf(1 - signif)
return CausalityTestResults(
causing,
caused,
statistic,
crit_value,
pvalue,
df,
signif,
test="granger",
method=kind,
) | Test Granger causality
Parameters
----------
caused : int or str or sequence of int or str
If int or str, test whether the variable specified via this index
(int) or name (str) is Granger-caused by the variable(s) specified
by `causing`.
If a sequence of int or str, test whether the corresponding
variables are Granger-caused by the variable(s) specified
by `causing`.
causing : int or str or sequence of int or str or None, default: None
If int or str, test whether the variable specified via this index
(int) or name (str) is Granger-causing the variable(s) specified by
`caused`.
If a sequence of int or str, test whether the corresponding
variables are Granger-causing the variable(s) specified by
`caused`.
If None, `causing` is assumed to be the complement of `caused`.
kind : {'f', 'wald'}
Perform F-test or Wald (chi-sq) test
signif : float, default 5%
Significance level for computing critical values for test,
defaulting to standard 0.05 level
Notes
-----
Null hypothesis is that there is no Granger-causality for the indicated
variables. The degrees of freedom in the F-test are based on the
number of variables in the VAR system, that is, degrees of freedom
are equal to the number of equations in the VAR times degree of freedom
of a single equation.
Test for Granger-causality as described in chapter 7.6.3 of [1]_.
Test H0: "`causing` does not Granger-cause the remaining variables of
the system" against H1: "`causing` is Granger-causal for the
remaining variables".
Returns
-------
results : CausalityTestResults
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer. | test_causality | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def test_inst_causality(self, causing, signif=0.05):
"""
Test for instantaneous causality
Parameters
----------
causing :
If int or str, test whether the corresponding variable is causing
the variable(s) specified in caused.
If sequence of int or str, test whether the corresponding
variables are causing the variable(s) specified in caused.
signif : float between 0 and 1, default 5 %
Significance level for computing critical values for test,
defaulting to standard 0.05 level
verbose : bool
If True, print a table with the results.
Returns
-------
results : dict
A dict holding the test's results. The dict's keys are:
"statistic" : float
The calculated test statistic.
"crit_value" : float
The critical value of the Chi^2-distribution.
"pvalue" : float
The p-value corresponding to the test statistic.
"df" : float
The degrees of freedom of the Chi^2-distribution.
"conclusion" : str {"reject", "fail to reject"}
Whether H0 can be rejected or not.
"signif" : float
Significance level
Notes
-----
Test for instantaneous causality as described in chapters 3.6.3 and
7.6.4 of [1]_.
Test H0: "No instantaneous causality between caused and causing"
against H1: "Instantaneous causality between caused and causing
exists".
Instantaneous causality is a symmetric relation (i.e. if causing is
"instantaneously causing" caused, then also caused is "instantaneously
causing" causing), thus the naming of the parameters (which is chosen
to be in accordance with test_granger_causality()) may be misleading.
This method is not returning the same result as JMulTi. This is
because the test is based on a VAR(k_ar) model in statsmodels
(in accordance to pp. 104, 320-321 in [1]_) whereas JMulTi seems
to be using a VAR(k_ar+1) model.
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer.
"""
if not (0 < signif < 1):
raise ValueError("signif has to be between 0 and 1")
allowed_types = (str, int)
if isinstance(causing, allowed_types):
causing = [causing]
if not all(isinstance(c, allowed_types) for c in causing):
raise TypeError(
"causing has to be of type string or int (or a "
+ "a sequence of these types)."
)
causing = [self.names[c] if type(c) is int else c for c in causing]
causing_ind = [util.get_index(self.names, c) for c in causing]
caused_ind = [i for i in range(self.neqs) if i not in causing_ind]
caused = [self.names[c] for c in caused_ind]
# Note: JMulTi seems to be using k_ar+1 instead of k_ar
k, t = self.neqs, self.nobs
num_restr = len(causing) * len(caused) # called N in Lütkepohl
sigma_u = self.sigma_u
vech_sigma_u = util.vech(sigma_u)
sig_mask = np.zeros(sigma_u.shape)
# set =1 twice to ensure, that all the ones needed are below the main
# diagonal:
sig_mask[causing_ind, caused_ind] = 1
sig_mask[caused_ind, causing_ind] = 1
vech_sig_mask = util.vech(sig_mask)
inds = np.nonzero(vech_sig_mask)[0]
# Make restriction matrix
C = np.zeros((num_restr, len(vech_sigma_u)), dtype=float)
for row in range(num_restr):
C[row, inds[row]] = 1
Cs = np.dot(C, vech_sigma_u)
d = np.linalg.pinv(duplication_matrix(k))
Cd = np.dot(C, d)
middle = np.linalg.inv(Cd @ np.kron(sigma_u, sigma_u) @ Cd.T) / 2
wald_statistic = t * (Cs.T @ middle @ Cs)
df = num_restr
dist = stats.chi2(df)
pvalue = dist.sf(wald_statistic)
crit_value = dist.ppf(1 - signif)
return CausalityTestResults(
causing,
caused,
wald_statistic,
crit_value,
pvalue,
df,
signif,
test="inst",
method="wald",
) | Test for instantaneous causality
Parameters
----------
causing :
If int or str, test whether the corresponding variable is causing
the variable(s) specified in caused.
If sequence of int or str, test whether the corresponding
variables are causing the variable(s) specified in caused.
signif : float between 0 and 1, default 5 %
Significance level for computing critical values for test,
defaulting to standard 0.05 level
verbose : bool
If True, print a table with the results.
Returns
-------
results : dict
A dict holding the test's results. The dict's keys are:
"statistic" : float
The calculated test statistic.
"crit_value" : float
The critical value of the Chi^2-distribution.
"pvalue" : float
The p-value corresponding to the test statistic.
"df" : float
The degrees of freedom of the Chi^2-distribution.
"conclusion" : str {"reject", "fail to reject"}
Whether H0 can be rejected or not.
"signif" : float
Significance level
Notes
-----
Test for instantaneous causality as described in chapters 3.6.3 and
7.6.4 of [1]_.
Test H0: "No instantaneous causality between caused and causing"
against H1: "Instantaneous causality between caused and causing
exists".
Instantaneous causality is a symmetric relation (i.e. if causing is
"instantaneously causing" caused, then also caused is "instantaneously
causing" causing), thus the naming of the parameters (which is chosen
to be in accordance with test_granger_causality()) may be misleading.
This method is not returning the same result as JMulTi. This is
because the test is based on a VAR(k_ar) model in statsmodels
(in accordance to pp. 104, 320-321 in [1]_) whereas JMulTi seems
to be using a VAR(k_ar+1) model.
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer. | test_inst_causality | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def test_whiteness(self, nlags=10, signif=0.05, adjusted=False):
"""
Residual whiteness tests using Portmanteau test
Parameters
----------
nlags : int > 0
The number of lags tested must be larger than the number of lags
included in the VAR model.
signif : float, between 0 and 1
The significance level of the test.
adjusted : bool, default False
Flag indicating to apply small-sample adjustments.
Returns
-------
WhitenessTestResults
The test results.
Notes
-----
Test the whiteness of the residuals using the Portmanteau test as
described in [1]_, chapter 4.4.3.
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer.
"""
if nlags - self.k_ar <= 0:
raise ValueError(
"The whiteness test can only be used when nlags "
"is larger than the number of lags included in "
f"the model ({self.k_ar})."
)
statistic = 0
u = np.asarray(self.resid)
acov_list = _compute_acov(u, nlags)
cov0_inv = np.linalg.inv(acov_list[0])
for t in range(1, nlags + 1):
ct = acov_list[t]
to_add = np.trace(ct.T @ cov0_inv @ ct @ cov0_inv)
if adjusted:
to_add /= self.nobs - t
statistic += to_add
statistic *= self.nobs**2 if adjusted else self.nobs
df = self.neqs**2 * (nlags - self.k_ar)
dist = stats.chi2(df)
pvalue = dist.sf(statistic)
crit_value = dist.ppf(1 - signif)
return WhitenessTestResults(
statistic, crit_value, pvalue, df, signif, nlags, adjusted
) | Residual whiteness tests using Portmanteau test
Parameters
----------
nlags : int > 0
The number of lags tested must be larger than the number of lags
included in the VAR model.
signif : float, between 0 and 1
The significance level of the test.
adjusted : bool, default False
Flag indicating to apply small-sample adjustments.
Returns
-------
WhitenessTestResults
The test results.
Notes
-----
Test the whiteness of the residuals using the Portmanteau test as
described in [1]_, chapter 4.4.3.
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer. | test_whiteness | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def test_normality(self, signif=0.05):
"""
Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test.
Parameters
----------
signif : float
Test significance level.
Returns
-------
result : NormalityTestResults
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process
"""
return test_normality(self, signif=signif) | Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test.
Parameters
----------
signif : float
Test significance level.
Returns
-------
result : NormalityTestResults
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process | test_normality | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def aic(self):
"""Akaike information criterion"""
return self.info_criteria["aic"] | Akaike information criterion | aic | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def fpe(self):
"""Final Prediction Error (FPE)
Lütkepohl p. 147, see info_criteria
"""
return self.info_criteria["fpe"] | Final Prediction Error (FPE)
Lütkepohl p. 147, see info_criteria | fpe | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def hqic(self):
"""Hannan-Quinn criterion"""
return self.info_criteria["hqic"] | Hannan-Quinn criterion | hqic | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def bic(self):
"""Bayesian a.k.a. Schwarz info criterion"""
return self.info_criteria["bic"] | Bayesian a.k.a. Schwarz info criterion | bic | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def roots(self):
"""
The roots of the VAR process are the solution to
(I - coefs[0]*z - coefs[1]*z**2 ... - coefs[p-1]*z**k_ar) = 0.
Note that the inverse roots are returned, and stability requires that
the roots lie outside the unit circle.
"""
neqs = self.neqs
k_ar = self.k_ar
p = neqs * k_ar
arr = np.zeros((p, p))
arr[:neqs, :] = np.column_stack(self.coefs)
arr[neqs:, :-neqs] = np.eye(p - neqs)
roots = np.linalg.eig(arr)[0] ** -1
idx = np.argsort(np.abs(roots))[::-1] # sort by reverse modulus
return roots[idx] | The roots of the VAR process are the solution to
(I - coefs[0]*z - coefs[1]*z**2 ... - coefs[p-1]*z**k_ar) = 0.
Note that the inverse roots are returned, and stability requires that
the roots lie outside the unit circle. | roots | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def cov(self):
"""Compute asymptotic standard errors
Returns
-------
"""
raise NotImplementedError | Compute asymptotic standard errors
Returns
------- | cov | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def plot(self, periods=None, figsize=(10, 10), **plot_kwds):
"""Plot graphical display of FEVD
Parameters
----------
periods : int, default None
Defaults to number originally specified. Can be at most that number
"""
import matplotlib.pyplot as plt
k = self.neqs
periods = periods or self.periods
fig, axes = plt.subplots(nrows=k, figsize=figsize)
fig.suptitle("Forecast error variance decomposition (FEVD)")
colors = [str(c) for c in np.arange(k, dtype=float) / k]
ticks = np.arange(periods)
limits = self.decomp.cumsum(2)
ax = axes[0]
for i in range(k):
ax = axes[i]
this_limits = limits[i].T
handles = []
for j in range(k):
lower = this_limits[j - 1] if j > 0 else 0
upper = this_limits[j]
handle = ax.bar(
ticks,
upper - lower,
bottom=lower,
color=colors[j],
label=self.names[j],
**plot_kwds,
)
handles.append(handle)
ax.set_title(self.names[i])
# just use the last axis to get handles for plotting
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc="upper right")
plotting.adjust_subplots(right=0.85)
return fig | Plot graphical display of FEVD
Parameters
----------
periods : int, default None
Defaults to number originally specified. Can be at most that number | plot | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def get_var_endog(y, lags, trend='c', has_constant='skip'):
"""
Make predictor matrix for VAR(p) process
Z := (Z_0, ..., Z_T).T (T x Kp)
Z_t = [1 y_t y_{t-1} ... y_{t - p + 1}] (Kp x 1)
Ref: Lütkepohl p.70 (transposed)
has_constant can be 'raise', 'add', or 'skip'. See add_constant.
"""
nobs = len(y)
# Ravel C order, need to put in descending order
Z = np.array([y[t-lags : t][::-1].ravel() for t in range(lags, nobs)])
# Add constant, trend, etc.
if trend != 'n':
Z = tsa.add_trend(Z, prepend=True, trend=trend,
has_constant=has_constant)
return Z | Make predictor matrix for VAR(p) process
Z := (Z_0, ..., Z_T).T (T x Kp)
Z_t = [1 y_t y_{t-1} ... y_{t - p + 1}] (Kp x 1)
Ref: Lütkepohl p.70 (transposed)
has_constant can be 'raise', 'add', or 'skip'. See add_constant. | get_var_endog | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/util.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/util.py | BSD-3-Clause |
def make_lag_names(names, lag_order, trendorder=1, exog=None):
"""
Produce list of lag-variable names. Constant / trends go at the beginning
Examples
--------
>>> make_lag_names(['foo', 'bar'], 2, 1)
['const', 'L1.foo', 'L1.bar', 'L2.foo', 'L2.bar']
"""
lag_names = []
if isinstance(names, str):
names = [names]
# take care of lagged endogenous names
for i in range(1, lag_order + 1):
for name in names:
if not isinstance(name, str):
name = str(name) # will need consistent unicode handling
lag_names.append('L'+str(i)+'.'+name)
# handle the constant name
if trendorder != 0:
lag_names.insert(0, 'const')
if trendorder > 1:
lag_names.insert(1, 'trend')
if trendorder > 2:
lag_names.insert(2, 'trend**2')
if exog is not None:
if isinstance(exog, pd.Series):
exog = pd.DataFrame(exog)
elif not hasattr(exog, 'ndim'):
exog = np.asarray(exog)
if exog.ndim == 1:
exog = exog[:, None]
for i in range(exog.shape[1]):
if isinstance(exog, pd.DataFrame):
exog_name = str(exog.columns[i])
else:
exog_name = "exog" + str(i)
lag_names.insert(trendorder + i, exog_name)
return lag_names | Produce list of lag-variable names. Constant / trends go at the beginning
Examples
--------
>>> make_lag_names(['foo', 'bar'], 2, 1)
['const', 'L1.foo', 'L1.bar', 'L2.foo', 'L2.bar'] | make_lag_names | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/util.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/util.py | BSD-3-Clause |
def comp_matrix(coefs):
"""
Return compansion matrix for the VAR(1) representation for a VAR(p) process
(companion form)
A = [A_1 A_2 ... A_p-1 A_p
I_K 0 0 0
0 I_K ... 0 0
0 ... I_K 0]
"""
p, k1, k2 = coefs.shape
if k1 != k2:
raise ValueError('coefs must be 3-d with shape (p, k, k).')
kp = k1 * p
result = np.zeros((kp, kp))
result[:k1] = np.concatenate(coefs, axis=1)
# Set I_K matrices
if p > 1:
result[np.arange(k1, kp), np.arange(kp-k1)] = 1
return result | Return compansion matrix for the VAR(1) representation for a VAR(p) process
(companion form)
A = [A_1 A_2 ... A_p-1 A_p
I_K 0 0 0
0 I_K ... 0 0
0 ... I_K 0] | comp_matrix | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/util.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/util.py | BSD-3-Clause |
def parse_lutkepohl_data(path): # pragma: no cover
"""
Parse data files from Lütkepohl (2005) book
Source for data files: www.jmulti.de
"""
from collections import deque
from datetime import datetime
import re
regex = re.compile(asbytes(r'<(.*) (\w)([\d]+)>.*'))
with open(path, 'rb') as f:
lines = deque(f)
to_skip = 0
while asbytes('*/') not in lines.popleft():
#while '*/' not in lines.popleft():
to_skip += 1
while True:
to_skip += 1
line = lines.popleft()
m = regex.match(line)
if m:
year, freq, start_point = m.groups()
break
data = (pd.read_csv(path, delimiter=r"\s+", header=to_skip+1)
.to_records(index=False))
n = len(data)
# generate the corresponding date range (using pandas for now)
start_point = int(start_point)
year = int(year)
offsets = {
asbytes('Q'): frequencies.BQuarterEnd(),
asbytes('M'): frequencies.BMonthEnd(),
asbytes('A'): frequencies.BYearEnd()
}
# create an instance
offset = offsets[freq]
inc = offset * (start_point - 1)
start_date = offset.rollforward(datetime(year, 1, 1)) + inc
offset = offsets[freq]
date_range = pd.date_range(start=start_date, freq=offset, periods=n)
return data, date_range | Parse data files from Lütkepohl (2005) book
Source for data files: www.jmulti.de | parse_lutkepohl_data | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/util.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/util.py | BSD-3-Clause |
def varsim(coefs, intercept, sig_u, steps=100, initial_values=None, seed=None, nsimulations=None):
"""
Simulate VAR(p) process, given coefficients and assuming Gaussian noise
Parameters
----------
coefs : ndarray
Coefficients for the VAR lags of endog.
intercept : None or ndarray 1-D (neqs,) or (steps, neqs)
This can be either the intercept for each equation or an offset.
If None, then the VAR process has a zero intercept.
If intercept is 1-D, then the same (endog specific) intercept is added
to all observations.
If intercept is 2-D, then it is treated as an offset and is added as
an observation specific intercept to the autoregression. In this case,
the intercept/offset should have same number of rows as steps, and the
same number of columns as endogenous variables (neqs).
sig_u : ndarray
Covariance matrix of the residuals or innovations.
If sig_u is None, then an identity matrix is used.
steps : {None, int}
number of observations to simulate, this includes the initial
observations to start the autoregressive process.
If offset is not None, then exog of the model are used if they were
provided in the model
initial_values : array_like, optional
Initial values for use in the simulation. Shape should be
(nlags, neqs) or (neqs,). Values should be ordered from less to
most recent. Note that this values will be returned by the
simulation as the first values of `endog_simulated` and they
will count for the total number of steps.
seed : {None, int}
If seed is not None, then it will be used with for the random
variables generated by numpy.random.
nsimulations : {None, int}
Number of simulations to perform. If `nsimulations` is None it will
perform one simulation and return value will have shape (steps, neqs).
Returns
-------
endog_simulated : nd_array
Endog of the simulated VAR process. Shape will be (nsimulations, steps, neqs)
or (steps, neqs) if `nsimulations` is None.
"""
rs = np.random.RandomState(seed=seed)
rmvnorm = rs.multivariate_normal
p, k, k = coefs.shape
nsimulations= int_like(nsimulations, "nsimulations", optional=True)
if isinstance(nsimulations, int) and nsimulations <= 0:
raise ValueError("nsimulations must be a positive integer if provided")
if nsimulations is None:
result_shape = (steps, k)
nsimulations = 1
else:
result_shape = (nsimulations, steps, k)
if sig_u is None:
sig_u = np.eye(k)
ugen = rmvnorm(np.zeros(len(sig_u)), sig_u, steps*nsimulations).reshape(nsimulations, steps, k)
result = np.zeros((nsimulations, steps, k))
if intercept is not None:
# intercept can be 2-D like an offset variable
if np.ndim(intercept) > 1:
if not len(intercept) == ugen.shape[1]:
raise ValueError('2-D intercept needs to have length `steps`')
# add intercept/offset also to intial values
result += intercept
result[:,p:] += ugen[:,p:]
else:
result[:,p:] = ugen[:,p:]
initial_values = array_like(initial_values, "initial_values", optional=True, maxdim=2)
if initial_values is not None:
if not (initial_values.shape == (p, k) or initial_values.shape == (k,)):
raise ValueError("initial_values should have shape (p, k) or (k,) where p is the number of lags and k is the number of equations.")
result[:,:p] = initial_values
# add in AR terms
for t in range(p, steps):
ygen = result[:,t]
for j in range(p):
ygen += np.dot(coefs[j], result[:,t-j-1].T).T
return result.reshape(result_shape) | Simulate VAR(p) process, given coefficients and assuming Gaussian noise
Parameters
----------
coefs : ndarray
Coefficients for the VAR lags of endog.
intercept : None or ndarray 1-D (neqs,) or (steps, neqs)
This can be either the intercept for each equation or an offset.
If None, then the VAR process has a zero intercept.
If intercept is 1-D, then the same (endog specific) intercept is added
to all observations.
If intercept is 2-D, then it is treated as an offset and is added as
an observation specific intercept to the autoregression. In this case,
the intercept/offset should have same number of rows as steps, and the
same number of columns as endogenous variables (neqs).
sig_u : ndarray
Covariance matrix of the residuals or innovations.
If sig_u is None, then an identity matrix is used.
steps : {None, int}
number of observations to simulate, this includes the initial
observations to start the autoregressive process.
If offset is not None, then exog of the model are used if they were
provided in the model
initial_values : array_like, optional
Initial values for use in the simulation. Shape should be
(nlags, neqs) or (neqs,). Values should be ordered from less to
most recent. Note that this values will be returned by the
simulation as the first values of `endog_simulated` and they
will count for the total number of steps.
seed : {None, int}
If seed is not None, then it will be used with for the random
variables generated by numpy.random.
nsimulations : {None, int}
Number of simulations to perform. If `nsimulations` is None it will
perform one simulation and return value will have shape (steps, neqs).
Returns
-------
endog_simulated : nd_array
Endog of the simulated VAR process. Shape will be (nsimulations, steps, neqs)
or (steps, neqs) if `nsimulations` is None. | varsim | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/util.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/util.py | BSD-3-Clause |
def eigval_decomp(sym_array):
"""
Returns
-------
W: array of eigenvectors
eigva: list of eigenvalues
k: largest eigenvector
"""
#check if symmetric, do not include shock period
eigva, W = linalg.eig(sym_array, left=True, right=False)
k = np.argmax(eigva)
return W, eigva, k | Returns
-------
W: array of eigenvectors
eigva: list of eigenvalues
k: largest eigenvector | eigval_decomp | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/util.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/util.py | BSD-3-Clause |
def vech(A):
"""
Simple vech operator
Returns
-------
vechvec: vector of all elements on and below diagonal
"""
length=A.shape[1]
vechvec=[]
for i in range(length):
b=i
while b < length:
vechvec.append(A[b,i])
b=b+1
vechvec=np.asarray(vechvec)
return vechvec | Simple vech operator
Returns
-------
vechvec: vector of all elements on and below diagonal | vech | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/util.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/util.py | BSD-3-Clause |
def seasonal_dummies(n_seasons, len_endog, first_period=0, centered=False):
"""
Parameters
----------
n_seasons : int >= 0
Number of seasons (e.g. 12 for monthly data and 4 for quarterly data).
len_endog : int >= 0
Total number of observations.
first_period : int, default: 0
Season of the first observation. As an example, suppose we have monthly
data and the first observation is in March (third month of the year).
In this case we pass 2 as first_period. (0 for the first season,
1 for the second, ..., n_seasons-1 for the last season).
An integer greater than n_seasons-1 are treated in the same way as the
integer modulo n_seasons.
centered : bool, default: False
If True, center (demean) the dummy variables. That is useful in order
to get seasonal dummies that are orthogonal to the vector of constant
dummy variables (a vector of ones).
Returns
-------
seasonal_dummies : ndarray (len_endog x n_seasons-1)
"""
if n_seasons == 0:
return np.empty((len_endog, 0))
if n_seasons > 0:
season_exog = np.zeros((len_endog, n_seasons - 1))
for i in range(n_seasons - 1):
season_exog[(i-first_period) % n_seasons::n_seasons, i] = 1
if centered:
season_exog -= 1 / n_seasons
return season_exog | Parameters
----------
n_seasons : int >= 0
Number of seasons (e.g. 12 for monthly data and 4 for quarterly data).
len_endog : int >= 0
Total number of observations.
first_period : int, default: 0
Season of the first observation. As an example, suppose we have monthly
data and the first observation is in March (third month of the year).
In this case we pass 2 as first_period. (0 for the first season,
1 for the second, ..., n_seasons-1 for the last season).
An integer greater than n_seasons-1 are treated in the same way as the
integer modulo n_seasons.
centered : bool, default: False
If True, center (demean) the dummy variables. That is useful in order
to get seasonal dummies that are orthogonal to the vector of constant
dummy variables (a vector of ones).
Returns
-------
seasonal_dummies : ndarray (len_endog x n_seasons-1) | seasonal_dummies | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/util.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/util.py | BSD-3-Clause |
def select_order(
data,
maxlags: int,
deterministic: str = "n",
seasons: int = 0,
exog=None,
exog_coint=None,
):
"""
Compute lag order selections based on each of the available information
criteria.
Parameters
----------
data : array_like (nobs_tot x neqs)
The observed data.
maxlags : int
All orders until maxlag will be compared according to the information
criteria listed in the Results-section of this docstring.
deterministic : str {"n", "co", "ci", "lo", "li"}
* ``"n"`` - no deterministic terms
* ``"co"`` - constant outside the cointegration relation
* ``"ci"`` - constant within the cointegration relation
* ``"lo"`` - linear trend outside the cointegration relation
* ``"li"`` - linear trend within the cointegration relation
Combinations of these are possible (e.g. ``"cili"`` or ``"colo"`` for
linear trend with intercept). See the docstring of the
:class:`VECM`-class for more information.
seasons : int, default: 0
Number of periods in a seasonal cycle.
exog : ndarray (nobs_tot x neqs) or `None`, default: `None`
Deterministic terms outside the cointegration relation.
exog_coint : ndarray (nobs_tot x neqs) or `None`, default: `None`
Deterministic terms inside the cointegration relation.
Returns
-------
selected_orders : :class:`statsmodels.tsa.vector_ar.var_model.LagOrderResults`
"""
ic = defaultdict(list)
deterministic = string_like(deterministic, "deterministic")
for p in range(1, maxlags + 2): # +2 because k_ar_VECM == k_ar_VAR - 1
exogs = []
if "co" in deterministic or "ci" in deterministic:
exogs.append(np.ones(len(data)).reshape(-1, 1))
if "lo" in deterministic or "li" in deterministic:
exogs.append(1 + np.arange(len(data)).reshape(-1, 1))
if exog_coint is not None:
exogs.append(exog_coint)
if seasons > 0:
exogs.append(
seasonal_dummies(seasons, len(data)).reshape(-1, seasons - 1)
)
if exog is not None:
exogs.append(exog)
exogs = hstack(exogs) if exogs else None
var_model = VAR(data, exogs)
# exclude some periods ==> same amount of data used for each lag order
var_result = var_model._estimate_var(lags=p, offset=maxlags + 1 - p)
for k, v in var_result.info_criteria.items():
ic[k].append(v)
# -1+1 in the following line is only here for clarification.
# -1 because k_ar_VECM == k_ar_VAR - 1
# +1 because p == index +1 (we start with p=1, not p=0)
selected_orders = {
ic_name: np.array(ic_value).argmin() - 1 + 1
for ic_name, ic_value in ic.items()
}
return LagOrderResults(ic, selected_orders, True) | Compute lag order selections based on each of the available information
criteria.
Parameters
----------
data : array_like (nobs_tot x neqs)
The observed data.
maxlags : int
All orders until maxlag will be compared according to the information
criteria listed in the Results-section of this docstring.
deterministic : str {"n", "co", "ci", "lo", "li"}
* ``"n"`` - no deterministic terms
* ``"co"`` - constant outside the cointegration relation
* ``"ci"`` - constant within the cointegration relation
* ``"lo"`` - linear trend outside the cointegration relation
* ``"li"`` - linear trend within the cointegration relation
Combinations of these are possible (e.g. ``"cili"`` or ``"colo"`` for
linear trend with intercept). See the docstring of the
:class:`VECM`-class for more information.
seasons : int, default: 0
Number of periods in a seasonal cycle.
exog : ndarray (nobs_tot x neqs) or `None`, default: `None`
Deterministic terms outside the cointegration relation.
exog_coint : ndarray (nobs_tot x neqs) or `None`, default: `None`
Deterministic terms inside the cointegration relation.
Returns
-------
selected_orders : :class:`statsmodels.tsa.vector_ar.var_model.LagOrderResults` | select_order | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def _linear_trend(nobs, k_ar, coint=False):
"""
Construct an ndarray representing a linear trend in a VECM.
Parameters
----------
nobs : int
Number of observations excluding the presample.
k_ar : int
Number of lags in levels.
coint : bool, default: False
If True (False), the returned array represents a linear trend inside
(outside) the cointegration relation.
Returns
-------
ret : ndarray (nobs)
An ndarray representing a linear trend in a VECM
Notes
-----
The returned array's size is nobs and not nobs_tot so it cannot be used to
construct the exog-argument of VECM's __init__ method.
"""
ret = np.arange(nobs) + k_ar
if not coint:
ret += 1
return ret | Construct an ndarray representing a linear trend in a VECM.
Parameters
----------
nobs : int
Number of observations excluding the presample.
k_ar : int
Number of lags in levels.
coint : bool, default: False
If True (False), the returned array represents a linear trend inside
(outside) the cointegration relation.
Returns
-------
ret : ndarray (nobs)
An ndarray representing a linear trend in a VECM
Notes
-----
The returned array's size is nobs and not nobs_tot so it cannot be used to
construct the exog-argument of VECM's __init__ method. | _linear_trend | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def _num_det_vars(det_string, seasons=0):
"""Gives the number of deterministic variables specified by det_string and
seasons.
Parameters
----------
det_string : str {"n", "co", "ci", "lo", "li"}
* "n" - no deterministic terms
* "co" - constant outside the cointegration relation
* "ci" - constant within the cointegration relation
* "lo" - linear trend outside the cointegration relation
* "li" - linear trend within the cointegration relation
Combinations of these are possible (e.g. "cili" or "colo" for linear
trend with intercept). See the docstring of the :class:`VECM`-class for
more information.
seasons : int
Number of periods in a seasonal cycle.
Returns
-------
num : int
Number of deterministic terms and number dummy variables for seasonal
terms.
"""
num = 0
det_string = string_like(det_string, "det_string")
if "ci" in det_string or "co" in det_string:
num += 1
if "li" in det_string or "lo" in det_string:
num += 1
if seasons > 0:
num += seasons - 1
return num | Gives the number of deterministic variables specified by det_string and
seasons.
Parameters
----------
det_string : str {"n", "co", "ci", "lo", "li"}
* "n" - no deterministic terms
* "co" - constant outside the cointegration relation
* "ci" - constant within the cointegration relation
* "lo" - linear trend outside the cointegration relation
* "li" - linear trend within the cointegration relation
Combinations of these are possible (e.g. "cili" or "colo" for linear
trend with intercept). See the docstring of the :class:`VECM`-class for
more information.
seasons : int
Number of periods in a seasonal cycle.
Returns
-------
num : int
Number of deterministic terms and number dummy variables for seasonal
terms. | _num_det_vars | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def _deterministic_to_exog(
deterministic,
seasons,
nobs_tot,
first_season=0,
seasons_centered=False,
exog=None,
exog_coint=None,
):
"""
Translate all information about deterministic terms into a single array.
These information is taken from `deterministic` and `seasons` as well as
from the `exog` and `exog_coint` arrays. The resulting array form can then
be used e.g. in VAR's __init__ method.
Parameters
----------
deterministic : str
A string specifying the deterministic terms in the model. See VECM's
docstring for more information.
seasons : int
Number of periods in a seasonal cycle.
nobs_tot : int
Number of observations including the presample.
first_season : int, default: 0
Season of the first observation.
seasons_centered : bool, default: False
If True, the seasonal dummy variables are demeaned such that they are
orthogonal to an intercept term.
exog : ndarray (nobs_tot x #det_terms) or None, default: None
An ndarray representing deterministic terms outside the cointegration
relation.
exog_coint : ndarray (nobs_tot x #det_terms_coint) or None, default: None
An ndarray representing deterministic terms inside the cointegration
relation.
Returns
-------
exog : ndarray or None
None, if the function's arguments do not contain deterministic terms.
Otherwise, an ndarray representing these deterministic terms.
"""
exogs = []
deterministic = string_like(deterministic, "deterministic")
if "co" in deterministic or "ci" in deterministic:
exogs.append(np.ones(nobs_tot))
if exog_coint is not None:
exogs.append(exog_coint)
if "lo" in deterministic or "li" in deterministic:
exogs.append(np.arange(nobs_tot))
if seasons > 0:
exogs.append(
seasonal_dummies(
seasons,
nobs_tot,
first_period=first_season,
centered=seasons_centered,
)
)
if exog is not None:
exogs.append(exog)
return np.column_stack(exogs) if exogs else None | Translate all information about deterministic terms into a single array.
These information is taken from `deterministic` and `seasons` as well as
from the `exog` and `exog_coint` arrays. The resulting array form can then
be used e.g. in VAR's __init__ method.
Parameters
----------
deterministic : str
A string specifying the deterministic terms in the model. See VECM's
docstring for more information.
seasons : int
Number of periods in a seasonal cycle.
nobs_tot : int
Number of observations including the presample.
first_season : int, default: 0
Season of the first observation.
seasons_centered : bool, default: False
If True, the seasonal dummy variables are demeaned such that they are
orthogonal to an intercept term.
exog : ndarray (nobs_tot x #det_terms) or None, default: None
An ndarray representing deterministic terms outside the cointegration
relation.
exog_coint : ndarray (nobs_tot x #det_terms_coint) or None, default: None
An ndarray representing deterministic terms inside the cointegration
relation.
Returns
-------
exog : ndarray or None
None, if the function's arguments do not contain deterministic terms.
Otherwise, an ndarray representing these deterministic terms. | _deterministic_to_exog | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def _mat_sqrt(_2darray):
"""Calculates the square root of a matrix.
Parameters
----------
_2darray : ndarray
A 2-dimensional ndarray representing a square matrix.
Returns
-------
result : ndarray
Square root of the matrix given as function argument.
"""
u_, s_, v_ = svd(_2darray, full_matrices=False)
s_ = np.sqrt(s_)
return u_.dot(s_[:, None] * v_) | Calculates the square root of a matrix.
Parameters
----------
_2darray : ndarray
A 2-dimensional ndarray representing a square matrix.
Returns
-------
result : ndarray
Square root of the matrix given as function argument. | _mat_sqrt | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def _endog_matrices(
endog,
exog,
exog_coint,
diff_lags,
deterministic,
seasons=0,
first_season=0,
):
"""
Returns different matrices needed for parameter estimation.
Compare p. 186 in [1]_. The returned matrices consist of elements of the
data as well as elements representing deterministic terms. A tuple of
consisting of these matrices is returned.
Parameters
----------
endog : ndarray (neqs x nobs_tot)
The whole sample including the presample.
exog : ndarray (nobs_tot x neqs) or None
Deterministic terms outside the cointegration relation.
exog_coint : ndarray (nobs_tot x neqs) or None
Deterministic terms inside the cointegration relation.
diff_lags : int
Number of lags in the VEC representation.
deterministic : str {``"n"``, ``"co"``, ``"ci"``, ``"lo"``, ``"li"``}
* ``"n"`` - no deterministic terms
* ``"co"`` - constant outside the cointegration relation
* ``"ci"`` - constant within the cointegration relation
* ``"lo"`` - linear trend outside the cointegration relation
* ``"li"`` - linear trend within the cointegration relation
Combinations of these are possible (e.g. ``"cili"`` or ``"colo"`` for
linear trend with intercept). See the docstring of the
:class:`VECM`-class for more information.
seasons : int, default: 0
Number of periods in a seasonal cycle. 0 (default) means no seasons.
first_season : int, default: 0
The season of the first observation. `0` means first season, `1` means
second season, ..., `seasons-1` means the last season.
Returns
-------
y_1_T : ndarray (neqs x nobs)
The (transposed) data without the presample.
`.. math:: (y_1, \\ldots, y_T)
delta_y_1_T : ndarray (neqs x nobs)
The first differences of endog.
`.. math:: (y_1, \\ldots, y_T) - (y_0, \\ldots, y_{T-1})
y_lag1 : ndarray (neqs x nobs)
(dimensions assuming no deterministic terms are given)
Endog of the previous period (lag 1).
`.. math:: (y_0, \\ldots, y_{T-1})
delta_x : ndarray (k_ar_diff*neqs x nobs)
(dimensions assuming no deterministic terms are given)
Lagged differenced endog, used as regressor for the short term
equation.
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer.
"""
deterministic = string_like(deterministic, "deterministic")
# p. 286:
p = diff_lags + 1
y = endog
K = y.shape[0]
y_1_T = y[:, p:]
T = y_1_T.shape[1]
delta_y = np.diff(y)
delta_y_1_T = delta_y[:, p - 1 :]
y_lag1 = y[:, p - 1 : -1]
if "co" in deterministic and "ci" in deterministic:
raise ValueError(
"Both 'co' and 'ci' as deterministic terms given. "
+ "Please choose one of the two."
)
y_lag1_stack = [y_lag1]
if "ci" in deterministic: # pp. 257, 299, 306, 307
y_lag1_stack.append(np.ones(T))
if "li" in deterministic: # p. 299
y_lag1_stack.append(_linear_trend(T, p, coint=True))
if exog_coint is not None:
y_lag1_stack.append(exog_coint[-T - 1 : -1].T)
y_lag1 = np.vstack(y_lag1_stack)
# p. 286:
delta_x = np.zeros((diff_lags * K, T))
if diff_lags > 0:
for j in range(delta_x.shape[1]):
delta_x[:, j] = delta_y[
:, j + p - 2 : None if j - 1 < 0 else j - 1 : -1
].T.reshape(K * (p - 1))
delta_x_stack = [delta_x]
# p. 299, p. 303:
if "co" in deterministic:
delta_x_stack.append(np.ones(T))
if seasons > 0:
delta_x_stack.append(
seasonal_dummies(
seasons,
delta_x.shape[1],
first_period=first_season + diff_lags + 1,
centered=True,
).T
)
if "lo" in deterministic:
delta_x_stack.append(_linear_trend(T, p))
if exog is not None:
delta_x_stack.append(exog[-T:].T)
delta_x = np.vstack(delta_x_stack)
return y_1_T, delta_y_1_T, y_lag1, delta_x | Returns different matrices needed for parameter estimation.
Compare p. 186 in [1]_. The returned matrices consist of elements of the
data as well as elements representing deterministic terms. A tuple of
consisting of these matrices is returned.
Parameters
----------
endog : ndarray (neqs x nobs_tot)
The whole sample including the presample.
exog : ndarray (nobs_tot x neqs) or None
Deterministic terms outside the cointegration relation.
exog_coint : ndarray (nobs_tot x neqs) or None
Deterministic terms inside the cointegration relation.
diff_lags : int
Number of lags in the VEC representation.
deterministic : str {``"n"``, ``"co"``, ``"ci"``, ``"lo"``, ``"li"``}
* ``"n"`` - no deterministic terms
* ``"co"`` - constant outside the cointegration relation
* ``"ci"`` - constant within the cointegration relation
* ``"lo"`` - linear trend outside the cointegration relation
* ``"li"`` - linear trend within the cointegration relation
Combinations of these are possible (e.g. ``"cili"`` or ``"colo"`` for
linear trend with intercept). See the docstring of the
:class:`VECM`-class for more information.
seasons : int, default: 0
Number of periods in a seasonal cycle. 0 (default) means no seasons.
first_season : int, default: 0
The season of the first observation. `0` means first season, `1` means
second season, ..., `seasons-1` means the last season.
Returns
-------
y_1_T : ndarray (neqs x nobs)
The (transposed) data without the presample.
`.. math:: (y_1, \\ldots, y_T)
delta_y_1_T : ndarray (neqs x nobs)
The first differences of endog.
`.. math:: (y_1, \\ldots, y_T) - (y_0, \\ldots, y_{T-1})
y_lag1 : ndarray (neqs x nobs)
(dimensions assuming no deterministic terms are given)
Endog of the previous period (lag 1).
`.. math:: (y_0, \\ldots, y_{T-1})
delta_x : ndarray (k_ar_diff*neqs x nobs)
(dimensions assuming no deterministic terms are given)
Lagged differenced endog, used as regressor for the short term
equation.
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer. | _endog_matrices | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def _r_matrices(delta_y_1_T, y_lag1, delta_x):
"""Returns two ndarrays needed for parameter estimation as well as the
calculation of standard errors.
Parameters
----------
delta_y_1_T : ndarray (neqs x nobs)
The first differences of endog.
`.. math:: (y_1, \\ldots, y_T) - (y_0, \\ldots, y_{T-1})
y_lag1 : ndarray (neqs x nobs)
(dimensions assuming no deterministic terms are given)
Endog of the previous period (lag 1).
`.. math:: (y_0, \\ldots, y_{T-1})
delta_x : ndarray (k_ar_diff*neqs x nobs)
(dimensions assuming no deterministic terms are given)
Lagged differenced endog, used as regressor for the short term
equation.
Returns
-------
result : tuple
A tuple of two ndarrays. (See p. 292 in [1]_ for the definition of
R_0 and R_1.)
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer.
"""
# todo: rewrite m such that a big (TxT) matrix is avoided
nobs = y_lag1.shape[1]
m = np.identity(nobs) - (
delta_x.T.dot(inv(delta_x.dot(delta_x.T))).dot(delta_x)
) # p. 291
r0 = delta_y_1_T.dot(m) # p. 292
r1 = y_lag1.dot(m)
return r0, r1 | Returns two ndarrays needed for parameter estimation as well as the
calculation of standard errors.
Parameters
----------
delta_y_1_T : ndarray (neqs x nobs)
The first differences of endog.
`.. math:: (y_1, \\ldots, y_T) - (y_0, \\ldots, y_{T-1})
y_lag1 : ndarray (neqs x nobs)
(dimensions assuming no deterministic terms are given)
Endog of the previous period (lag 1).
`.. math:: (y_0, \\ldots, y_{T-1})
delta_x : ndarray (k_ar_diff*neqs x nobs)
(dimensions assuming no deterministic terms are given)
Lagged differenced endog, used as regressor for the short term
equation.
Returns
-------
result : tuple
A tuple of two ndarrays. (See p. 292 in [1]_ for the definition of
R_0 and R_1.)
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer. | _r_matrices | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def _sij(delta_x, delta_y_1_T, y_lag1):
"""Returns matrices and eigenvalues and -vectors used for parameter
estimation and the calculation of a models loglikelihood.
Parameters
----------
delta_x : ndarray (k_ar_diff*neqs x nobs)
(dimensions assuming no deterministic terms are given)
delta_y_1_T : ndarray (neqs x nobs)
:math:`(y_1, \\ldots, y_T) - (y_0, \\ldots, y_{T-1})`
y_lag1 : ndarray (neqs x nobs)
(dimensions assuming no deterministic terms are given)
:math:`(y_0, \\ldots, y_{T-1})`
Returns
-------
result : tuple
A tuple of five ndarrays as well as eigenvalues and -vectors of a
certain (matrix) product of some of the returned ndarrays.
(See pp. 294-295 in [1]_ for more information on
:math:`S_0, S_1, \\lambda_i, \\v_i` for
:math:`i \\in \\{1, \\dots, K\\}`.)
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer.
"""
nobs = y_lag1.shape[1]
r0, r1 = _r_matrices(delta_y_1_T, y_lag1, delta_x)
s00 = np.dot(r0, r0.T) / nobs
s01 = np.dot(r0, r1.T) / nobs
s10 = s01.T
s11 = np.dot(r1, r1.T) / nobs
s11_ = inv(_mat_sqrt(s11))
# p. 295:
s01_s11_ = np.dot(s01, s11_)
eig = np.linalg.eig(s01_s11_.T @ inv(s00) @ s01_s11_)
lambd = eig[0]
v = eig[1]
# reorder eig_vals to make them decreasing (and order eig_vecs accordingly)
lambd_order = np.argsort(lambd)[::-1]
lambd = lambd[lambd_order]
v = v[:, lambd_order]
return s00, s01, s10, s11, s11_, lambd, v | Returns matrices and eigenvalues and -vectors used for parameter
estimation and the calculation of a models loglikelihood.
Parameters
----------
delta_x : ndarray (k_ar_diff*neqs x nobs)
(dimensions assuming no deterministic terms are given)
delta_y_1_T : ndarray (neqs x nobs)
:math:`(y_1, \\ldots, y_T) - (y_0, \\ldots, y_{T-1})`
y_lag1 : ndarray (neqs x nobs)
(dimensions assuming no deterministic terms are given)
:math:`(y_0, \\ldots, y_{T-1})`
Returns
-------
result : tuple
A tuple of five ndarrays as well as eigenvalues and -vectors of a
certain (matrix) product of some of the returned ndarrays.
(See pp. 294-295 in [1]_ for more information on
:math:`S_0, S_1, \\lambda_i, \\v_i` for
:math:`i \\in \\{1, \\dots, K\\}`.)
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer. | _sij | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def select_coint_rank(
endog, det_order, k_ar_diff, method="trace", signif=0.05
):
"""Calculate the cointegration rank of a VECM.
Parameters
----------
endog : array_like (nobs_tot x neqs)
The data with presample.
det_order : int
* -1 - no deterministic terms
* 0 - constant term
* 1 - linear trend
k_ar_diff : int, nonnegative
Number of lagged differences in the model.
method : str, {``"trace"``, ``"maxeig"``}, default: ``"trace"``
If ``"trace"``, the trace test statistic is used. If ``"maxeig"``, the
maximum eigenvalue test statistic is used.
signif : float, {0.1, 0.05, 0.01}, default: 0.05
The test's significance level.
Returns
-------
rank : :class:`CointRankResults`
A :class:`CointRankResults` object containing the cointegration rank suggested
by the test and allowing a summary to be printed.
"""
if method not in ["trace", "maxeig"]:
raise ValueError(
"The method argument has to be either 'trace' or"
"'maximum eigenvalue'."
)
if det_order not in [-1, 0, 1]:
if type(det_order) is int and det_order > 1:
raise ValueError(
"A det_order greather than 1 is not supported."
"Use a value of -1, 0, or 1."
)
else:
raise ValueError("det_order must be -1, 0, or 1.")
possible_signif_values = [0.1, 0.05, 0.01]
if signif not in possible_signif_values:
raise ValueError(
"Please choose a significance level from {0.1, 0.05," "0.01}"
)
coint_result = coint_johansen(endog, det_order, k_ar_diff)
test_stat = coint_result.lr1 if method == "trace" else coint_result.lr2
crit_vals = coint_result.cvt if method == "trace" else coint_result.cvm
signif_index = possible_signif_values.index(signif)
neqs = endog.shape[1]
r_0 = 0 # rank in null hypothesis
while r_0 < neqs:
if test_stat[r_0] < crit_vals[r_0, signif_index]:
break # we accept current rank
else:
r_0 += 1 # we reject current rank and test next possible rank
return CointRankResults(
r_0,
neqs,
test_stat[: r_0 + 1],
crit_vals[: r_0 + 1, signif_index],
method,
signif,
) | Calculate the cointegration rank of a VECM.
Parameters
----------
endog : array_like (nobs_tot x neqs)
The data with presample.
det_order : int
* -1 - no deterministic terms
* 0 - constant term
* 1 - linear trend
k_ar_diff : int, nonnegative
Number of lagged differences in the model.
method : str, {``"trace"``, ``"maxeig"``}, default: ``"trace"``
If ``"trace"``, the trace test statistic is used. If ``"maxeig"``, the
maximum eigenvalue test statistic is used.
signif : float, {0.1, 0.05, 0.01}, default: 0.05
The test's significance level.
Returns
-------
rank : :class:`CointRankResults`
A :class:`CointRankResults` object containing the cointegration rank suggested
by the test and allowing a summary to be printed. | select_coint_rank | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def coint_johansen(endog, det_order, k_ar_diff):
"""
Johansen cointegration test of the cointegration rank of a VECM
Parameters
----------
endog : array_like (nobs_tot x neqs)
Data to test
det_order : int
* -1 - no deterministic terms
* 0 - constant term
* 1 - linear trend
k_ar_diff : int, nonnegative
Number of lagged differences in the model.
Returns
-------
result : JohansenTestResult
An object containing the test's results. The most important attributes
of the result class are:
* trace_stat and trace_stat_crit_vals
* max_eig_stat and max_eig_stat_crit_vals
Notes
-----
The implementation might change to make more use of the existing VECM
framework.
See Also
--------
statsmodels.tsa.vector_ar.vecm.select_coint_rank
References
----------
.. [1] Lütkepohl, H. 2005. New Introduction to Multiple Time Series
Analysis. Springer.
"""
import warnings
if det_order not in [-1, 0, 1]:
warnings.warn(
"Critical values are only available for a det_order of "
"-1, 0, or 1.",
category=HypothesisTestWarning,
stacklevel=2,
)
if endog.shape[1] > 12: # todo: test with a time series of 13 variables
warnings.warn(
"Critical values are only available for time series "
"with 12 variables at most.",
category=HypothesisTestWarning,
stacklevel=2,
)
from statsmodels.regression.linear_model import OLS
def detrend(y, order):
if order == -1:
return y
return (
OLS(y, np.vander(np.linspace(-1, 1, len(y)), order + 1))
.fit()
.resid
)
def resid(y, x):
if x.size == 0:
return y
r = y - np.dot(x, np.dot(np.linalg.pinv(x), y))
return r
endog = np.asarray(endog)
nobs, neqs = endog.shape
# why this? f is detrend transformed series, det_order is detrend data
if det_order > -1:
f = 0
else:
f = det_order
endog = detrend(endog, det_order)
dx = np.diff(endog, 1, axis=0)
z = lagmat(dx, k_ar_diff)
z = z[k_ar_diff:]
z = detrend(z, f)
dx = dx[k_ar_diff:]
dx = detrend(dx, f)
r0t = resid(dx, z)
# GH 5731, [:-0] does not work, need [:t-0]
lx = endog[: (endog.shape[0] - k_ar_diff)]
lx = lx[1:]
dx = detrend(lx, f)
rkt = resid(dx, z) # level on lagged diffs
# Level covariance after filtering k_ar_diff
skk = np.dot(rkt.T, rkt) / rkt.shape[0]
# Covariacne between filtered and unfiltered
sk0 = np.dot(rkt.T, r0t) / rkt.shape[0]
s00 = np.dot(r0t.T, r0t) / r0t.shape[0]
sig = np.dot(sk0, np.dot(inv(s00), sk0.T))
tmp = inv(skk)
au, du = np.linalg.eig(np.dot(tmp, sig)) # au is eval, du is evec
temp = inv(np.linalg.cholesky(np.dot(du.T, np.dot(skk, du))))
dt = np.dot(du, temp)
# JP: the next part can be done much easier
auind = np.argsort(au)
aind = np.flipud(auind)
a = au[aind]
d = dt[:, aind]
# Normalize by first non-zero element of d, usually [0, 0]
# GH 5517
non_zero_d = d.flat != 0
if np.any(non_zero_d):
d *= np.sign(d.flat[non_zero_d][0])
# Compute the trace and max eigenvalue statistics
lr1 = np.zeros(neqs)
lr2 = np.zeros(neqs)
cvm = np.zeros((neqs, 3))
cvt = np.zeros((neqs, 3))
iota = np.ones(neqs)
t, junk = rkt.shape
for i in range(0, neqs):
tmp = np.log(iota - a)[i:]
lr1[i] = -t * np.sum(tmp, 0)
lr2[i] = -t * np.log(1 - a[i])
cvm[i, :] = c_sja(neqs - i, det_order)
cvt[i, :] = c_sjt(neqs - i, det_order)
aind[i] = i
return JohansenTestResult(rkt, r0t, a, d, lr1, lr2, cvt, cvm, aind) | Johansen cointegration test of the cointegration rank of a VECM
Parameters
----------
endog : array_like (nobs_tot x neqs)
Data to test
det_order : int
* -1 - no deterministic terms
* 0 - constant term
* 1 - linear trend
k_ar_diff : int, nonnegative
Number of lagged differences in the model.
Returns
-------
result : JohansenTestResult
An object containing the test's results. The most important attributes
of the result class are:
* trace_stat and trace_stat_crit_vals
* max_eig_stat and max_eig_stat_crit_vals
Notes
-----
The implementation might change to make more use of the existing VECM
framework.
See Also
--------
statsmodels.tsa.vector_ar.vecm.select_coint_rank
References
----------
.. [1] Lütkepohl, H. 2005. New Introduction to Multiple Time Series
Analysis. Springer. | coint_johansen | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def rkt(self):
"""Residuals for :math:`Y_{-1}`"""
return self._rkt | Residuals for :math:`Y_{-1}` | rkt | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def r0t(self):
"""Residuals for :math:`\\Delta Y`."""
return self._r0t | Residuals for :math:`\\Delta Y`. | r0t | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def eig(self):
"""Eigenvalues of VECM coefficient matrix"""
return self._eig | Eigenvalues of VECM coefficient matrix | eig | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def evec(self):
"""Eigenvectors of VECM coefficient matrix"""
return self._evec | Eigenvectors of VECM coefficient matrix | evec | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def trace_stat(self):
"""Trace statistic"""
return self._lr1 | Trace statistic | trace_stat | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def lr1(self):
"""Trace statistic"""
return self._lr1 | Trace statistic | lr1 | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def max_eig_stat(self):
"""Maximum eigenvalue statistic"""
return self._lr2 | Maximum eigenvalue statistic | max_eig_stat | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def lr2(self):
"""Maximum eigenvalue statistic"""
return self._lr2 | Maximum eigenvalue statistic | lr2 | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def trace_stat_crit_vals(self):
"""Critical values (90%, 95%, 99%) of trace statistic"""
return self._cvt | Critical values (90%, 95%, 99%) of trace statistic | trace_stat_crit_vals | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def cvt(self):
"""Critical values (90%, 95%, 99%) of trace statistic"""
return self._cvt | Critical values (90%, 95%, 99%) of trace statistic | cvt | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def cvm(self):
"""Critical values (90%, 95%, 99%) of maximum eigenvalue statistic."""
return self._cvm | Critical values (90%, 95%, 99%) of maximum eigenvalue statistic. | cvm | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def max_eig_stat_crit_vals(self):
"""Critical values (90%, 95%, 99%) of maximum eigenvalue statistic."""
return self._cvm | Critical values (90%, 95%, 99%) of maximum eigenvalue statistic. | max_eig_stat_crit_vals | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def ind(self):
"""Order of eigenvalues"""
return self._ind | Order of eigenvalues | ind | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def meth(self):
"""Test method"""
return self._meth | Test method | meth | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def fit(self, method="ml"):
"""
Estimates the parameters of a VECM.
The estimation procedure is described on pp. 269-304 in [1]_.
Parameters
----------
method : str {"ml"}, default: "ml"
Estimation method to use. "ml" stands for Maximum Likelihood.
Returns
-------
est : :class:`VECMResults`
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer.
"""
if method == "ml":
return self._estimate_vecm_ml()
else:
raise ValueError(
"{} not recognized, must be among {}".format(method, "ml")
) | Estimates the parameters of a VECM.
The estimation procedure is described on pp. 269-304 in [1]_.
Parameters
----------
method : str {"ml"}, default: "ml"
Estimation method to use. "ml" stands for Maximum Likelihood.
Returns
-------
est : :class:`VECMResults`
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer. | fit | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def _lagged_param_names(self):
"""
Returns parameter names (for Gamma and deterministics) for the summary.
Returns
-------
param_names : list of str
Returns a list of parameter names for the lagged endogenous
parameters which are called :math:`\\Gamma` in [1]_
(see chapter 6).
If present in the model, also names for deterministic terms outside
the cointegration relation are returned. They name the elements of
the matrix C in [1]_ (p. 299).
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer.
"""
param_names = []
# 1. Deterministic terms outside cointegration relation
if "co" in self.deterministic:
param_names += ["const.%s" % n for n in self.endog_names]
if self.seasons > 0:
param_names += [
"season%d.%s" % (s, n)
for s in range(1, self.seasons)
for n in self.endog_names
]
if "lo" in self.deterministic:
param_names += ["lin_trend.%s" % n for n in self.endog_names]
if self.exog is not None:
param_names += [
"exog%d.%s" % (exog_no, n)
for exog_no in range(1, self.exog.shape[1] + 1)
for n in self.endog_names
]
# 2. lagged endogenous terms
param_names += [
"L%d.%s.%s" % (i + 1, n1, n2)
for n2 in self.endog_names
for i in range(self.k_ar_diff)
for n1 in self.endog_names
]
return param_names | Returns parameter names (for Gamma and deterministics) for the summary.
Returns
-------
param_names : list of str
Returns a list of parameter names for the lagged endogenous
parameters which are called :math:`\\Gamma` in [1]_
(see chapter 6).
If present in the model, also names for deterministic terms outside
the cointegration relation are returned. They name the elements of
the matrix C in [1]_ (p. 299).
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer. | _lagged_param_names | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def _load_coef_param_names(self):
"""
Returns parameter names (for alpha) for the summary.
Returns
-------
param_names : list of str
Returns a list of parameter names for the loading coefficients
which are called :math:`\\alpha` in [1]_ (see chapter 6).
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer.
"""
param_names = []
if self.coint_rank == 0:
return None
# loading coefficients (alpha) # called "ec" in JMulTi, "ECT" in tsDyn,
param_names += [ # and "_ce" in Stata
self.load_coef_repr + "%d.%s" % (i + 1, self.endog_names[j])
for j in range(self.neqs)
for i in range(self.coint_rank)
]
return param_names | Returns parameter names (for alpha) for the summary.
Returns
-------
param_names : list of str
Returns a list of parameter names for the loading coefficients
which are called :math:`\\alpha` in [1]_ (see chapter 6).
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer. | _load_coef_param_names | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def _coint_param_names(self):
"""
Returns parameter names (for beta and deterministics) for the summary.
Returns
-------
param_names : list of str
Returns a list of parameter names for the cointegration matrix
as well as deterministic terms inside the cointegration relation
(if present in the model).
"""
# 1. cointegration matrix/vector
param_names = []
param_names += [
("beta.%d." + self.load_coef_repr + "%d") % (j + 1, i + 1)
for i in range(self.coint_rank)
for j in range(self.neqs)
]
# 2. deterministic terms inside cointegration relation
if "ci" in self.deterministic:
param_names += [
"const." + self.load_coef_repr + "%d" % (i + 1)
for i in range(self.coint_rank)
]
if "li" in self.deterministic:
param_names += [
"lin_trend." + self.load_coef_repr + "%d" % (i + 1)
for i in range(self.coint_rank)
]
if self.exog_coint is not None:
param_names += [
"exog_coint%d.%s" % (n + 1, exog_no)
for exog_no in range(1, self.exog_coint.shape[1] + 1)
for n in range(self.neqs)
]
return param_names | Returns parameter names (for beta and deterministics) for the summary.
Returns
-------
param_names : list of str
Returns a list of parameter names for the cointegration matrix
as well as deterministic terms inside the cointegration relation
(if present in the model). | _coint_param_names | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def llf(self): # Lutkepohl p. 295 (7.2.20)
"""
Compute the VECM's loglikelihood.
"""
K = self.neqs
T = self.nobs
r = self.coint_rank
s00, _, _, _, _, lambd, _ = _sij(
self._delta_x, self._delta_y_1_T, self._y_lag1
)
return (
-K * T * np.log(2 * np.pi) / 2
- T * (np.log(np.linalg.det(s00)) + sum(np.log(1 - lambd)[:r])) / 2
- K * T / 2
) | Compute the VECM's loglikelihood. | llf | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def stderr_coint(self):
"""
Standard errors of beta and deterministic terms inside the
cointegration relation.
Notes
-----
See p. 297 in [1]_. Using the rule
.. math::
vec(B R) = (B' \\otimes I) vec(R)
for two matrices B and R which are compatible for multiplication.
This is rule (3) on p. 662 in [1]_.
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer.
"""
r = self.coint_rank
_, r1 = _r_matrices(self._delta_y_1_T, self._y_lag1, self._delta_x)
r12 = r1[r:]
if r12.size == 0:
return np.zeros((r, r))
mat1 = inv(r12.dot(r12.T))
mat1 = np.kron(mat1.T, np.identity(r))
det = self.det_coef_coint.shape[0]
mat2 = np.kron(
np.identity(self.neqs - r + det),
inv(self.alpha.T @ inv(self.sigma_u) @ self.alpha),
)
first_rows = np.zeros((r, r))
last_rows_1d = np.sqrt(np.diag(mat1.dot(mat2)))
last_rows = last_rows_1d.reshape((self.neqs - r + det, r), order="F")
return vstack((first_rows, last_rows)) | Standard errors of beta and deterministic terms inside the
cointegration relation.
Notes
-----
See p. 297 in [1]_. Using the rule
.. math::
vec(B R) = (B' \\otimes I) vec(R)
for two matrices B and R which are compatible for multiplication.
This is rule (3) on p. 662 in [1]_.
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer. | stderr_coint | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def cov_var_repr(self):
"""
Gives the covariance matrix of the corresponding VAR-representation.
More precisely, the covariance matrix of the vector consisting of the
columns of the corresponding VAR coefficient matrices (i.e.
vec(self.var_rep)).
Returns
-------
cov : array (neqs**2 * k_ar x neqs**2 * k_ar)
"""
# This implementation is using the fact that for a random variable x
# with covariance matrix Sigma_x the following holds:
# B @ x with B being a suitably sized matrix has the covariance matrix
# B @ Sigma_x @ B.T. The arrays called vecm_var_transformation and
# self.cov_params_wo_det in the code play the roles of B and Sigma_x
# respectively. The elements of the random variable x are the elements
# of the estimated matrices Pi (alpha @ beta.T) and Gamma.
# Alternatively the following code (commented out) would yield the same
# result (following p. 289 in Lutkepohl):
# K, p = self.neqs, self.k_ar
# w = np.identity(K * p)
# w[np.arange(K, len(w)), np.arange(K, len(w))] *= (-1)
# w[np.arange(K, len(w)), np.arange(len(w)-K)] = 1
#
# w_eye = np.kron(w, np.identity(K))
#
# return w_eye.T @ self.cov_params_default @ w_eye
if self.k_ar - 1 == 0:
return self.cov_params_wo_det
vecm_var_transformation = np.zeros(
(self.neqs ** 2 * self.k_ar, self.neqs ** 2 * self.k_ar)
)
eye = np.identity(self.neqs ** 2)
# for A_1:
vecm_var_transformation[
: self.neqs ** 2, : 2 * self.neqs ** 2
] = hstack((eye, eye))
# for A_i, where i = 2, ..., k_ar-1
for i in range(2, self.k_ar):
start_row = self.neqs ** 2 + (i - 2) * self.neqs ** 2
start_col = self.neqs ** 2 + (i - 2) * self.neqs ** 2
vecm_var_transformation[
start_row : start_row + self.neqs ** 2,
start_col : start_col + 2 * self.neqs ** 2,
] = hstack((-eye, eye))
# for A_p:
vecm_var_transformation[-self.neqs ** 2 :, -self.neqs ** 2 :] = -eye
vvt = vecm_var_transformation
return vvt @ self.cov_params_wo_det @ vvt.T | Gives the covariance matrix of the corresponding VAR-representation.
More precisely, the covariance matrix of the vector consisting of the
columns of the corresponding VAR coefficient matrices (i.e.
vec(self.var_rep)).
Returns
-------
cov : array (neqs**2 * k_ar x neqs**2 * k_ar) | cov_var_repr | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def orth_ma_rep(self, maxn=10, P=None):
"""Compute orthogonalized MA coefficient matrices.
For this purpose a matrix P is used which fulfills
:math:`\\Sigma_u = PP^\\prime`. P defaults to the Cholesky
decomposition of :math:`\\Sigma_u`
Parameters
----------
maxn : int
Number of coefficient matrices to compute
P : ndarray (neqs x neqs), optional
Matrix such that :math:`\\Sigma_u = PP'`. Defaults to Cholesky
decomposition.
Returns
-------
coefs : ndarray (maxn x neqs x neqs)
"""
return orth_ma_rep(self, maxn, P) | Compute orthogonalized MA coefficient matrices.
For this purpose a matrix P is used which fulfills
:math:`\\Sigma_u = PP^\\prime`. P defaults to the Cholesky
decomposition of :math:`\\Sigma_u`
Parameters
----------
maxn : int
Number of coefficient matrices to compute
P : ndarray (neqs x neqs), optional
Matrix such that :math:`\\Sigma_u = PP'`. Defaults to Cholesky
decomposition.
Returns
-------
coefs : ndarray (maxn x neqs x neqs) | orth_ma_rep | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def predict(self, steps=5, alpha=None, exog_fc=None, exog_coint_fc=None):
"""
Calculate future values of the time series.
Parameters
----------
steps : int
Prediction horizon.
alpha : float, 0 < `alpha` < 1 or None
If None, compute point forecast only.
If float, compute confidence intervals too. In this case the
argument stands for the confidence level.
exog : ndarray (steps x self.exog.shape[1])
If self.exog is not None, then information about the future values
of exog have to be passed via this parameter. The ndarray may be
larger in it's first dimension. In this case only the first steps
rows will be considered.
Returns
-------
forecast - ndarray (steps x neqs) or three ndarrays
In case of a point forecast: each row of the returned ndarray
represents the forecast of the neqs variables for a specific
period. The first row (index [0]) is the forecast for the next
period, the last row (index [steps-1]) is the steps-periods-ahead-
forecast.
"""
if self.exog is not None and exog_fc is None:
raise ValueError(
"exog_fc is None: Please pass the future values "
"of the VECM's exog terms via the exog_fc "
"argument!"
)
if self.exog is None and exog_fc is not None:
raise ValueError(
"This VECMResult-instance's exog attribute is "
"None. Please do not pass a non-None value as the "
"method's exog_fc-argument."
)
if exog_fc is not None and exog_fc.shape[0] < steps:
raise ValueError(
"The argument exog_fc must have at least steps "
"elements in its first dimension"
)
if self.exog_coint is not None and exog_coint_fc is None:
raise ValueError(
"exog_coint_fc is None: Please pass the future "
"values of the VECM's exog_coint terms via the "
"exog_coint_fc argument!"
)
if self.exog_coint is None and exog_coint_fc is not None:
raise ValueError(
"This VECMResult-instance's exog_coint attribute "
"is None. Please do not pass a non-None value as "
"the method's exog_coint_fc-argument."
)
if exog_coint_fc is not None and exog_coint_fc.shape[0] < steps - 1:
raise ValueError(
"The argument exog_coint_fc must have at least "
"steps elements in its first dimension"
)
last_observations = self.y_all.T[-self.k_ar :]
exog = []
trend_coefs = []
# adding deterministic terms outside cointegration relation
exog_const = np.ones(steps)
nobs_tot = self.nobs + self.k_ar
if self.const.size > 0:
exog.append(exog_const)
trend_coefs.append(self.const.T)
if self.seasons > 0:
first_future_season = (self.first_season + nobs_tot) % self.seasons
exog_seasonal = seasonal_dummies(
self.seasons, steps, first_future_season, True
)
exog.append(exog_seasonal)
trend_coefs.append(self.seasonal.T)
exog_lin_trend = _linear_trend(self.nobs, self.k_ar)
exog_lin_trend = exog_lin_trend[-1] + 1 + np.arange(steps)
if self.lin_trend.size > 0:
exog.append(exog_lin_trend)
trend_coefs.append(self.lin_trend.T)
if exog_fc is not None:
exog.append(exog_fc[:steps])
trend_coefs.append(self.exog_coefs.T)
# adding deterministic terms inside cointegration relation
if "ci" in self.deterministic:
exog.append(exog_const)
trend_coefs.append(self.alpha.dot(self.const_coint.T).T)
exog_lin_trend_coint = _linear_trend(self.nobs, self.k_ar, coint=True)
exog_lin_trend_coint = exog_lin_trend_coint[-1] + 1 + np.arange(steps)
if "li" in self.deterministic:
exog.append(exog_lin_trend_coint)
trend_coefs.append(self.alpha.dot(self.lin_trend_coint.T).T)
if exog_coint_fc is not None:
if exog_coint_fc.ndim == 1:
exog_coint_fc = exog_coint_fc[:, None] # make 2-D
exog_coint_fc = np.vstack(
(self.exog_coint[-1:], exog_coint_fc[: steps - 1])
)
exog.append(exog_coint_fc)
trend_coefs.append(self.alpha.dot(self.exog_coint_coefs.T).T)
# glueing all deterministics together
exog = np.column_stack(exog) if exog != [] else None
if trend_coefs != []:
trend_coefs = np.vstack(trend_coefs)
else:
trend_coefs = None
# call the forecasting function of the VAR-module
if alpha is not None:
return forecast_interval(
last_observations,
self.var_rep,
trend_coefs,
self.sigma_u,
steps,
alpha=alpha,
exog=exog,
)
else:
return forecast(
last_observations, self.var_rep, trend_coefs, steps, exog
) | Calculate future values of the time series.
Parameters
----------
steps : int
Prediction horizon.
alpha : float, 0 < `alpha` < 1 or None
If None, compute point forecast only.
If float, compute confidence intervals too. In this case the
argument stands for the confidence level.
exog : ndarray (steps x self.exog.shape[1])
If self.exog is not None, then information about the future values
of exog have to be passed via this parameter. The ndarray may be
larger in it's first dimension. In this case only the first steps
rows will be considered.
Returns
-------
forecast - ndarray (steps x neqs) or three ndarrays
In case of a point forecast: each row of the returned ndarray
represents the forecast of the neqs variables for a specific
period. The first row (index [0]) is the forecast for the next
period, the last row (index [steps-1]) is the steps-periods-ahead-
forecast. | predict | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def plot_forecast(
self, steps, alpha=0.05, plot_conf_int=True, n_last_obs=None
):
"""
Plot the forecast.
Parameters
----------
steps : int
Prediction horizon.
alpha : float, 0 < `alpha` < 1
The confidence level.
plot_conf_int : bool, default: True
If True, plot bounds of confidence intervals.
n_last_obs : int or None, default: None
If int, restrict plotted history to n_last_obs observations.
If None, include the whole history in the plot.
"""
mid, lower, upper = self.predict(steps, alpha=alpha)
y = self.y_all.T
y = y[self.k_ar :] if n_last_obs is None else y[-n_last_obs:]
plot.plot_var_forc(
y,
mid,
lower,
upper,
names=self.names,
plot_stderr=plot_conf_int,
legend_options={"loc": "lower left"},
) | Plot the forecast.
Parameters
----------
steps : int
Prediction horizon.
alpha : float, 0 < `alpha` < 1
The confidence level.
plot_conf_int : bool, default: True
If True, plot bounds of confidence intervals.
n_last_obs : int or None, default: None
If int, restrict plotted history to n_last_obs observations.
If None, include the whole history in the plot. | plot_forecast | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def fittedvalues(self):
"""
Return the in-sample values of endog calculated by the model.
Returns
-------
fitted : array (nobs x neqs)
The predicted in-sample values of the models' endogenous variables.
"""
beta = self.beta
if self.det_coef_coint.size > 0:
beta = vstack((beta, self.det_coef_coint))
pi = np.dot(self.alpha, beta.T)
gamma = self.gamma
if self.det_coef.size > 0:
gamma = hstack((gamma, self.det_coef))
delta_y = np.dot(pi, self._y_lag1) + np.dot(gamma, self._delta_x)
return (delta_y + self._y_lag1[: self.neqs]).T | Return the in-sample values of endog calculated by the model.
Returns
-------
fitted : array (nobs x neqs)
The predicted in-sample values of the models' endogenous variables. | fittedvalues | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def resid(self):
"""
Return the difference between observed and fitted values.
Returns
-------
resid : array (nobs x neqs)
The residuals.
"""
return self.y_all.T[self.k_ar :] - self.fittedvalues | Return the difference between observed and fitted values.
Returns
-------
resid : array (nobs x neqs)
The residuals. | resid | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def test_whiteness(self, nlags=10, signif=0.05, adjusted=False):
"""
Test the whiteness of the residuals using the Portmanteau test.
This test is described in [1]_, chapter 8.4.1.
Parameters
----------
nlags : int > 0
signif : float, 0 < `signif` < 1
adjusted : bool, default False
Returns
-------
result : :class:`statsmodels.tsa.vector_ar.hypothesis_test_results.WhitenessTestResults`
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer.
"""
statistic = 0
u = np.asarray(self.resid)
acov_list = _compute_acov(u, nlags)
# self.sigma_u instead of cov(0) is necessary to get the same
# result as JMulTi. The difference between the two is that sigma_u is
# calculated with the usual residuals while in cov(0) the
# residuals are demeaned. To me JMulTi's behaviour seems a bit strange
# because it uses the usual residuals here but demeaned residuals in
# the calculation of autocovariances with lag > 0. (used in the
# argument of trace() four rows below this comment.)
c0_inv = inv(self.sigma_u) # instead of inv(cov(0))
if c0_inv.dtype == np.complex128 and np.all(np.imag(c0_inv) == 0):
c0_inv = np.real(c0_inv)
for t in range(1, nlags + 1):
ct = acov_list[t]
to_add = np.trace(ct.T @ c0_inv @ ct @ c0_inv)
if adjusted:
to_add /= self.nobs - t
statistic += to_add
statistic *= self.nobs ** 2 if adjusted else self.nobs
df = (
self.neqs ** 2 * (nlags - self.k_ar + 1)
- self.neqs * self.coint_rank
)
dist = scipy.stats.chi2(df)
pvalue = dist.sf(statistic)
crit_value = dist.ppf(1 - signif)
return WhitenessTestResults(
statistic, crit_value, pvalue, df, signif, nlags, adjusted
) | Test the whiteness of the residuals using the Portmanteau test.
This test is described in [1]_, chapter 8.4.1.
Parameters
----------
nlags : int > 0
signif : float, 0 < `signif` < 1
adjusted : bool, default False
Returns
-------
result : :class:`statsmodels.tsa.vector_ar.hypothesis_test_results.WhitenessTestResults`
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series Analysis*. Springer. | test_whiteness | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def plot_data(self, with_presample=False):
"""
Plot the input time series.
Parameters
----------
with_presample : bool, default: `False`
If `False`, the pre-sample data (the first `k_ar` values) will
not be plotted.
"""
y = self.y_all if with_presample else self.y_all[:, self.k_ar :]
names = self.names
dates = self.dates if with_presample else self.dates[self.k_ar :]
plot.plot_mts(y.T, names=names, index=dates) | Plot the input time series.
Parameters
----------
with_presample : bool, default: `False`
If `False`, the pre-sample data (the first `k_ar` values) will
not be plotted. | plot_data | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def summary(self, alpha=0.05):
"""
Return a summary of the estimation results.
Parameters
----------
alpha : float 0 < `alpha` < 1, default 0.05
Significance level of the shown confidence intervals.
Returns
-------
summary : :class:`statsmodels.iolib.summary.Summary`
A summary containing information about estimated parameters.
"""
from statsmodels.iolib.summary import summary_params
summary = Summary()
def make_table(
self,
params,
std_err,
t_values,
p_values,
conf_int,
mask,
names,
title,
strip_end=True,
):
res = (
self,
params[mask],
std_err[mask],
t_values[mask],
p_values[mask],
conf_int[mask],
)
param_names = [
".".join(name.split(".")[:-1]) if strip_end else name
for name in np.array(names)[mask].tolist()
]
return summary_params(
res,
yname=None,
xname=param_names,
alpha=alpha,
use_t=False,
title=title,
)
# ---------------------------------------------------------------------
# Add tables with gamma and det_coef for each endogenous variable:
lagged_params_components = []
stderr_lagged_params_components = []
tvalues_lagged_params_components = []
pvalues_lagged_params_components = []
conf_int_lagged_params_components = []
if self.det_coef.size > 0:
lagged_params_components.append(self.det_coef.flatten(order="F"))
stderr_lagged_params_components.append(
self.stderr_det_coef.flatten(order="F")
)
tvalues_lagged_params_components.append(
self.tvalues_det_coef.flatten(order="F")
)
pvalues_lagged_params_components.append(
self.pvalues_det_coef.flatten(order="F")
)
conf_int = self.conf_int_det_coef(alpha=alpha)
lower = conf_int["lower"].flatten(order="F")
upper = conf_int["upper"].flatten(order="F")
conf_int_lagged_params_components.append(
np.column_stack((lower, upper))
)
if self.k_ar - 1 > 0:
lagged_params_components.append(self.gamma.flatten())
stderr_lagged_params_components.append(self.stderr_gamma.flatten())
tvalues_lagged_params_components.append(
self.tvalues_gamma.flatten()
)
pvalues_lagged_params_components.append(
self.pvalues_gamma.flatten()
)
conf_int = self.conf_int_gamma(alpha=alpha)
lower = conf_int["lower"].flatten()
upper = conf_int["upper"].flatten()
conf_int_lagged_params_components.append(
np.column_stack((lower, upper))
)
# if gamma or det_coef exists, then make a summary-table for them:
if len(lagged_params_components) != 0:
lagged_params = hstack(lagged_params_components)
stderr_lagged_params = hstack(stderr_lagged_params_components)
tvalues_lagged_params = hstack(tvalues_lagged_params_components)
pvalues_lagged_params = hstack(pvalues_lagged_params_components)
conf_int_lagged_params = vstack(conf_int_lagged_params_components)
for i in range(self.neqs):
masks = []
offset = 0
# 1. Deterministic terms outside cointegration relation
if "co" in self.deterministic:
masks.append(offset + np.array(i, ndmin=1))
offset += self.neqs
if self.seasons > 0:
for _ in range(self.seasons - 1):
masks.append(offset + np.array(i, ndmin=1))
offset += self.neqs
if "lo" in self.deterministic:
masks.append(offset + np.array(i, ndmin=1))
offset += self.neqs
if self.exog is not None:
for _ in range(self.exog.shape[1]):
masks.append(offset + np.array(i, ndmin=1))
offset += self.neqs
# 2. Lagged endogenous terms
if self.k_ar - 1 > 0:
start = i * self.neqs * (self.k_ar - 1)
end = (i + 1) * self.neqs * (self.k_ar - 1)
masks.append(offset + np.arange(start, end))
# offset += self.neqs**2 * (self.k_ar-1)
# Create the table
mask = np.concatenate(masks)
eq_name = self.model.endog_names[i]
title = (
"Det. terms outside the coint. relation "
+ "& lagged endog. parameters for equation %s" % eq_name
)
table = make_table(
self,
lagged_params,
stderr_lagged_params,
tvalues_lagged_params,
pvalues_lagged_params,
conf_int_lagged_params,
mask,
self.model._lagged_param_names,
title,
)
summary.tables.append(table)
# ---------------------------------------------------------------------
# Loading coefficients (alpha):
a = self.alpha.flatten()
se_a = self.stderr_alpha.flatten()
t_a = self.tvalues_alpha.flatten()
p_a = self.pvalues_alpha.flatten()
ci_a = self.conf_int_alpha(alpha=alpha)
lower = ci_a["lower"].flatten()
upper = ci_a["upper"].flatten()
ci_a = np.column_stack((lower, upper))
a_names = self.model._load_coef_param_names
alpha_masks = []
for i in range(self.neqs):
if self.coint_rank > 0:
start = i * self.coint_rank
end = start + self.coint_rank
mask = np.arange(start, end)
# Create the table
alpha_masks.append(mask)
eq_name = self.model.endog_names[i]
title = "Loading coefficients (alpha) for equation %s" % eq_name
table = make_table(
self, a, se_a, t_a, p_a, ci_a, mask, a_names, title
)
summary.tables.append(table)
# ---------------------------------------------------------------------
# Cointegration matrix/vector (beta) and det. terms inside coint. rel.:
coint_components = []
stderr_coint_components = []
tvalues_coint_components = []
pvalues_coint_components = []
conf_int_coint_components = []
if self.coint_rank > 0:
coint_components.append(self.beta.T.flatten())
stderr_coint_components.append(self.stderr_beta.T.flatten())
tvalues_coint_components.append(self.tvalues_beta.T.flatten())
pvalues_coint_components.append(self.pvalues_beta.T.flatten())
conf_int = self.conf_int_beta(alpha=alpha)
lower = conf_int["lower"].T.flatten()
upper = conf_int["upper"].T.flatten()
conf_int_coint_components.append(np.column_stack((lower, upper)))
if self.det_coef_coint.size > 0:
coint_components.append(self.det_coef_coint.flatten())
stderr_coint_components.append(
self.stderr_det_coef_coint.flatten()
)
tvalues_coint_components.append(
self.tvalues_det_coef_coint.flatten()
)
pvalues_coint_components.append(
self.pvalues_det_coef_coint.flatten()
)
conf_int = self.conf_int_det_coef_coint(alpha=alpha)
lower = conf_int["lower"].flatten()
upper = conf_int["upper"].flatten()
conf_int_coint_components.append(np.column_stack((lower, upper)))
coint = hstack(coint_components)
stderr_coint = hstack(stderr_coint_components)
tvalues_coint = hstack(tvalues_coint_components)
pvalues_coint = hstack(pvalues_coint_components)
conf_int_coint = vstack(conf_int_coint_components)
coint_names = self.model._coint_param_names
for i in range(self.coint_rank):
masks = []
offset = 0
# 1. Cointegration matrix (beta)
if self.coint_rank > 0:
start = i * self.neqs
end = start + self.neqs
masks.append(offset + np.arange(start, end))
offset += self.neqs * self.coint_rank
# 2. Deterministic terms inside cointegration relation
if "ci" in self.deterministic:
masks.append(offset + np.array(i, ndmin=1))
offset += self.coint_rank
if "li" in self.deterministic:
masks.append(offset + np.array(i, ndmin=1))
offset += self.coint_rank
if self.exog_coint is not None:
for _ in range(self.exog_coint.shape[1]):
masks.append(offset + np.array(i, ndmin=1))
offset += self.coint_rank
# Create the table
mask = np.concatenate(masks)
title = (
"Cointegration relations for "
+ "loading-coefficients-column %d" % (i + 1)
)
table = make_table(
self,
coint,
stderr_coint,
tvalues_coint,
pvalues_coint,
conf_int_coint,
mask,
coint_names,
title,
)
summary.tables.append(table)
return summary | Return a summary of the estimation results.
Parameters
----------
alpha : float 0 < `alpha` < 1, default 0.05
Significance level of the shown confidence intervals.
Returns
-------
summary : :class:`statsmodels.iolib.summary.Summary`
A summary containing information about estimated parameters. | summary | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/vecm.py | BSD-3-Clause |
def load_results_statsmodels_exog(dataset):
"""
Load data with seasonal terms in `exog`.
Same as load_results_statsmodels() except that the seasonal term is
provided to :class:`VECM`'s `__init__()` method via the `eoxg` parameter.
This is to check whether the same results are produced no matter whether
`exog` or `seasons` is being used.
Parameters
----------
dataset : DataSet
"""
results_per_deterministic_terms = dict.fromkeys(dataset.dt_s_list)
endog = data[dataset]
for dt_s_tup in dataset.dt_s_list:
det_string = dt_s_tup[0]
seasons = dt_s_tup[1]
first_season = dt_s_tup[2]
if seasons == 0:
exog = None
else:
exog = seasonal_dummies(
seasons, len(data[dataset]), first_season, centered=True
)
if "lo" in dt_s_tup[0]:
exog = np.hstack(
(exog, 1 + np.arange(len(endog)).reshape(-1, 1))
)
# remove "lo" since it's now already in exog.
det_string = det_string[:-2]
model = VECM(
endog,
exog,
k_ar_diff=3,
coint_rank=coint_rank,
deterministic=det_string,
)
results_per_deterministic_terms[dt_s_tup] = model.fit(method="ml")
return results_per_deterministic_terms | Load data with seasonal terms in `exog`.
Same as load_results_statsmodels() except that the seasonal term is
provided to :class:`VECM`'s `__init__()` method via the `eoxg` parameter.
This is to check whether the same results are produced no matter whether
`exog` or `seasons` is being used.
Parameters
----------
dataset : DataSet | load_results_statsmodels_exog | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/tests/test_vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/tests/test_vecm.py | BSD-3-Clause |
def load_results_statsmodels_exog_coint(dataset):
"""
Load data with deterministic terms in `exog_coint`.
Same as load_results_statsmodels() except that deterministic terms inside
the cointegration relation are provided to :class:`VECM`'s `__init__()`
method via the `eoxg_coint` parameter. This is to check whether the same
results are produced no matter whether `exog_coint` or the `deterministic`
argument is being used.
Parameters
----------
dataset : DataSet
"""
results_per_deterministic_terms = dict.fromkeys(dataset.dt_s_list)
endog = data[dataset]
for dt_s_tup in dataset.dt_s_list:
det_string = dt_s_tup[0]
if "ci" not in det_string and "li" not in det_string:
exog_coint = None
else:
exog_coint = []
if "li" in det_string:
exog_coint.append(1 + np.arange(len(endog)).reshape(-1, 1))
det_string = det_string[:-2]
if "ci" in det_string:
exog_coint.append(np.ones(len(endog)).reshape(-1, 1))
det_string = det_string[:-2]
# reversing (such that constant is first and linear is second)
exog_coint = exog_coint[::-1]
exog_coint = np.hstack(exog_coint)
model = VECM(
endog,
exog=None,
exog_coint=exog_coint,
k_ar_diff=3,
coint_rank=coint_rank,
deterministic=det_string,
seasons=dt_s_tup[1],
first_season=dt_s_tup[2],
)
results_per_deterministic_terms[dt_s_tup] = model.fit(method="ml")
return results_per_deterministic_terms | Load data with deterministic terms in `exog_coint`.
Same as load_results_statsmodels() except that deterministic terms inside
the cointegration relation are provided to :class:`VECM`'s `__init__()`
method via the `eoxg_coint` parameter. This is to check whether the same
results are produced no matter whether `exog_coint` or the `deterministic`
argument is being used.
Parameters
----------
dataset : DataSet | load_results_statsmodels_exog_coint | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/tests/test_vecm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/tests/test_vecm.py | BSD-3-Clause |
def reorder_jmultis_det_terms(jmulti_output, constant, seasons):
"""
In case of seasonal terms and a trend term we have to reorder them to make
the outputs from JMulTi and statsmodels comparable.
JMulTi's ordering is: [constant], [seasonal terms], [trend term] while
in statsmodels it is: [constant], [trend term], [seasonal terms]
Parameters
----------
jmulti_output : ndarray (neqs x number_of_deterministic_terms)
constant : bool
Indicates whether there is a constant term or not in jmulti_output.
seasons : int
Number of seasons in the model. That means there are seasons-1
columns for seasonal terms in jmulti_output
Returns
-------
reordered : ndarray (neqs x number_of_deterministic_terms)
jmulti_output reordered such that the order of deterministic terms
matches that of statsmodels.
"""
if seasons == 0:
return jmulti_output
constant = int(constant)
const_column = jmulti_output[:, :constant]
season_columns = jmulti_output[:, constant : constant + seasons - 1].copy()
trend_columns = jmulti_output[:, constant + seasons - 1 :].copy()
return np.hstack((const_column, trend_columns, season_columns)) | In case of seasonal terms and a trend term we have to reorder them to make
the outputs from JMulTi and statsmodels comparable.
JMulTi's ordering is: [constant], [seasonal terms], [trend term] while
in statsmodels it is: [constant], [trend term], [seasonal terms]
Parameters
----------
jmulti_output : ndarray (neqs x number_of_deterministic_terms)
constant : bool
Indicates whether there is a constant term or not in jmulti_output.
seasons : int
Number of seasons in the model. That means there are seasons-1
columns for seasonal terms in jmulti_output
Returns
-------
reordered : ndarray (neqs x number_of_deterministic_terms)
jmulti_output reordered such that the order of deterministic terms
matches that of statsmodels. | reorder_jmultis_det_terms | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/tests/test_var_jmulti.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/tests/test_var_jmulti.py | BSD-3-Clause |
def generate_exog_from_season(seasons, endog_len):
"""
Translate seasons to exog matrix.
Parameters
----------
seasons : int
Number of seasons.
endog_len : int
Number of observations.
Returns
-------
exog : ndarray or None
If seasonal deterministic terms exist, the corresponding exog-matrix is
returned.
Otherwise, None is returned.
"""
exog_stack = []
if seasons > 0:
season_exog = np.zeros((seasons - 1, endog_len))
for i in range(seasons - 1):
season_exog[i, i::seasons] = 1
# season_exog = season_exog[:, ::-1]
# season_exog = np.hstack((season_exog[:, 3:4],
# season_exog[:, :-1]))
# season_exog = np.hstack((season_exog[:, 2:4],
# season_exog[:, :-2]))
# season_exog = np.hstack((season_exog[:, 1:4], season_exog[:, :-3]))
# season_exog[1] = -season_exog[1]
# the following line is commented out because seasonal terms are
# *not* centered in JMulTi's VAR-framework (in contrast to VECM)
# season_exog -= 1 / seasons
season_exog = season_exog.T
exog_stack.append(season_exog)
if exog_stack != []:
exog = np.column_stack(exog_stack)
else:
exog = None
return exog | Translate seasons to exog matrix.
Parameters
----------
seasons : int
Number of seasons.
endog_len : int
Number of observations.
Returns
-------
exog : ndarray or None
If seasonal deterministic terms exist, the corresponding exog-matrix is
returned.
Otherwise, None is returned. | generate_exog_from_season | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/tests/test_var_jmulti.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/tests/test_var_jmulti.py | BSD-3-Clause |
def bivariate_var_data(reset_randomstate):
"""A bivariate dataset for VAR estimation"""
e = np.random.standard_normal((252, 2))
y = np.zeros_like(e)
y[:2] = e[:2]
for i in range(2, 252):
y[i] = 0.2 * y[i - 1] + 0.1 * y[i - 2] + e[i]
return y | A bivariate dataset for VAR estimation | bivariate_var_data | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/tests/test_var.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/tests/test_var.py | BSD-3-Clause |
def bivariate_var_result(bivariate_var_data):
"""A bivariate VARResults for reuse"""
mod = VAR(bivariate_var_data)
return mod.fit() | A bivariate VARResults for reuse | bivariate_var_result | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/tests/test_var.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/tests/test_var.py | BSD-3-Clause |
def sublists(lst, min_elmts=0, max_elmts=None):
"""Build a list of all possible sublists of a given list. Restrictions
on the length of the sublists can be posed via the min_elmts and max_elmts
parameters.
All sublists
have will have at least min_elmts elements and not more than max_elmts
elements.
Parameters
----------
lst : list
Original list from which sublists are generated.
min_elmts : int
Lower bound for the length of sublists.
max_elmts : int or None
If int, then max_elmts are the upper bound for the length of sublists.
If None, sublists' length is not restricted. In this case the longest
sublist will be of the same length as the original list lst.
Returns
-------
result : list
A list of all sublists of lst fulfilling the length restrictions.
"""
if max_elmts is None:
max_elmts = len(lst)
# for the following see also the definition of powerset() in
# https://docs.python.org/dev/library/itertools.html#itertools-recipes
result = itertools.chain.from_iterable(
itertools.combinations(lst, sublist_len)
for sublist_len in range(min_elmts, max_elmts+1))
if type(result) is not list:
result = list(result)
return result | Build a list of all possible sublists of a given list. Restrictions
on the length of the sublists can be posed via the min_elmts and max_elmts
parameters.
All sublists
have will have at least min_elmts elements and not more than max_elmts
elements.
Parameters
----------
lst : list
Original list from which sublists are generated.
min_elmts : int
Lower bound for the length of sublists.
max_elmts : int or None
If int, then max_elmts are the upper bound for the length of sublists.
If None, sublists' length is not restricted. In this case the longest
sublist will be of the same length as the original list lst.
Returns
-------
result : list
A list of all sublists of lst fulfilling the length restrictions. | sublists | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/tests/JMulTi_results/parse_jmulti_vecm_output.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/tests/JMulTi_results/parse_jmulti_vecm_output.py | BSD-3-Clause |
def stringify_var_names(var_list, delimiter=""):
"""
Parameters
----------
var_list : list[str]
Each list element is the name of a variable.
Returns
-------
result : str
Concatenated variable names.
"""
result = var_list[0]
for var_name in var_list[1:]:
result += delimiter + var_name
return result.lower() | Parameters
----------
var_list : list[str]
Each list element is the name of a variable.
Returns
-------
result : str
Concatenated variable names. | stringify_var_names | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/tests/JMulTi_results/parse_jmulti_vecm_output.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/tests/JMulTi_results/parse_jmulti_vecm_output.py | BSD-3-Clause |
def load_results_jmulti(dataset):
"""
Parameters
----------
dataset : module
A data module in the statsmodels/datasets directory that defines a
__str__() method returning the dataset's name.
dt_s_list : list
A list of strings where each string represents a combination of
deterministic terms.
Returns
-------
result : dict
A dict (keys: tuples of deterministic terms and seasonal terms)
of dicts (keys: strings "est" (for estimators),
"se" (for standard errors),
"t" (for t-values),
"p" (for p-values))
of dicts (keys: strings "alpha", "beta", "Gamma" and other results)
"""
source = "jmulti"
results_dict_per_det_terms = dict.fromkeys(dataset.dt_s_list)
for dt_s in dataset.dt_s_list:
dt_string = dt_s_tup_to_string(dt_s)
params_file = "vecm_"+dataset.__str__()+"_"+source+"_"+dt_string+".txt"
params_file = os.path.join(here, params_file)
# sections in jmulti output:
section_header = ["Lagged endogenous term", # Gamma
"Deterministic term", # co, s, lo
"Loading coefficients", # alpha
"Estimated cointegration relation", # beta
"Legend",
"Lagged endogenous term", # VAR representation
"Deterministic term"] # VAR representation
# the following "sections" will serve as key for the corresponding
# result values
sections = ["Gamma",
"C", # Here all deterministic term coefficients are
# collected. (const and linear trend which belong
# to cointegration relation as well as seasonal
# components which are outside the cointegration
# relation. Later, we will strip the terms related
# to the cointegration relation from C.
"alpha",
"beta",
"Legend",
"VAR A", # VAR parameter matrices
"VAR deterministic"] # VAR deterministic terms
if "co" not in dt_string and "lo" not in dt_string \
and "s" not in dt_string:
# JMulTi: no deterministic terms section in VEC representation
del section_header[1]
del sections[1]
if "ci" not in dt_string and "li" not in dt_string:
# JMulTi: no deterministic section in VAR repr.
del section_header[-1]
del sections[-1]
results = dict()
results["est"] = dict.fromkeys(sections)
results["se"] = dict.fromkeys(sections)
results["t"] = dict.fromkeys(sections)
results["p"] = dict.fromkeys(sections)
section = -1
result = []
result_se = []
result_t = []
result_p = []
rows = 0
started_reading_section = False
start_end_mark = "-----"
# ---------------------------------------------------------------------
# parse information about \alpha, \beta, \Gamma, deterministic of VECM
# and A_i and deterministic of corresponding VAR:
params_file = open(params_file, encoding="latin_1")
for line in params_file:
if section == -1 and section_header[section+1] not in line:
continue
if section < len(section_header)-1 \
and section_header[section+1] in line: # new section
section += 1
continue
if not started_reading_section:
if line.startswith(start_end_mark):
started_reading_section = True
continue
if started_reading_section:
if line.startswith(start_end_mark):
if result == []: # no values collected in section "Legend"
started_reading_section = False
continue
results["est"][sections[section]] = np.column_stack(
result)
result = []
results["se"][sections[section]] = np.column_stack(
result_se)
result_se = []
results["t"][sections[section]] = np.column_stack(
result_t)
result_t = []
results["p"][sections[section]] = np.column_stack(
result_p)
result_p = []
started_reading_section = False
continue
str_number = r"-?\d+\.\d{3}"
regex_est = re.compile(str_number + r"[^\)\]\}]")
est_col = re.findall(regex_est, line)
# standard errors in parantheses in JMulTi output:
regex_se = re.compile(r"\(" + str_number + r"\)")
se_col = re.findall(regex_se, line)
# t-values in brackets in JMulTi output:
regex_t_value = re.compile(r"\[" + str_number + r"\]")
t_col = re.findall(regex_t_value, line)
# p-values in braces in JMulTi output:
regex_p_value = re.compile(r"\{" + str_number + r"\}")
p_col = re.findall(regex_p_value, line)
if result == [] and est_col != []:
rows = len(est_col)
if est_col != []:
est_col = [float(el) for el in est_col]
result.append(est_col)
elif se_col != []:
for i in range(rows):
se_col[i] = se_col[i].replace("(", "").replace(")", "")
se_col = [float(el) for el in se_col]
result_se.append(se_col)
elif t_col != []:
for i in range(rows):
t_col[i] = t_col[i].replace("[", "").replace("]", "")
t_col = [float(el) for el in t_col]
result_t.append(t_col)
elif p_col != []:
for i in range(rows):
p_col[i] = p_col[i].replace("{", "").replace("}", "")
p_col = [float(el) for el in p_col]
result_p.append(p_col)
params_file.close()
# delete "Legend"-section of JMulTi:
del results["est"]["Legend"]
del results["se"]["Legend"]
del results["t"]["Legend"]
del results["p"]["Legend"]
# JMulTi outputs beta.T
results["est"]["beta"] = results["est"]["beta"].T
results["se"]["beta"] = results["se"]["beta"].T
results["t"]["beta"] = results["t"]["beta"].T
results["p"]["beta"] = results["p"]["beta"].T
# split information about beta and deterministic terms inside coint.
alpha = results["est"]["alpha"]
beta = results["est"]["beta"]
alpha_rows = alpha.shape[0]
if beta.shape[0] > alpha_rows:
results["est"]["beta"], results["est"]["det_coint"] = np.vsplit(
results["est"]["beta"], [alpha_rows])
results["se"]["beta"], results["se"]["det_coint"] = np.vsplit(
results["se"]["beta"], [alpha_rows])
results["t"]["beta"], results["t"]["det_coint"] = np.vsplit(
results["t"]["beta"], [alpha_rows])
results["p"]["beta"], results["p"]["det_coint"] = np.vsplit(
results["p"]["beta"], [alpha_rows])
# ---------------------------------------------------------------------
# parse information regarding \Sigma_u
sigmau_file = "vecm_" + dataset.__str__() + "_" + source + "_" + \
dt_string + "_Sigmau" + ".txt"
sigmau_file = os.path.join(here, sigmau_file)
rows_to_parse = 0
# all numbers of Sigma_u in notation with e (e.g. 2.283862e-05)
regex_est = re.compile(r"\s+\S+e\S+")
sigmau_section_reached = False
sigmau_file = open(sigmau_file, encoding="latin_1")
for line in sigmau_file:
if line.startswith("Log Likelihood:"):
line = line.split("Log Likelihood:")[1]
results["log_like"] = float(re.findall(regex_est, line)[0])
if not sigmau_section_reached and "Covariance:" not in line:
continue
if "Covariance:" in line:
sigmau_section_reached = True
row = re.findall(regex_est, line)
rows_to_parse = len(row) # Sigma_u quadratic ==> #rows==#cols
sigma_u = np.empty((rows_to_parse, rows_to_parse))
row = re.findall(regex_est, line)
rows_to_parse -= 1
sigma_u[rows_to_parse] = row # rows are added in reverse order
if rows_to_parse == 0:
break
sigmau_file.close()
results["est"]["Sigma_u"] = sigma_u[::-1]
# ---------------------------------------------------------------------
# parse forecast related output:
fc_file = "vecm_" + dataset.__str__() + "_" + source + "_" + \
dt_string + "_fc5" + ".txt"
fc_file = os.path.join(here, fc_file)
fc, lower, upper, plu_min = [], [], [], []
fc_file = open(fc_file, encoding='latin_1')
for line in fc_file:
str_number = r"(\s+-?\d+\.\d{4}\s*?)"
regex_number = re.compile(str_number)
numbers = re.findall(regex_number, line)
if numbers == []:
continue
fc.append(float(numbers[0]))
lower.append(float(numbers[1]))
upper.append(float(numbers[2]))
plu_min.append(float(numbers[3]))
fc_file.close()
variables = alpha.shape[0]
fc = np.hstack(np.vsplit(np.array(fc)[:, None], variables))
lower = np.hstack(np.vsplit(np.array(lower)[:, None], variables))
upper = np.hstack(np.vsplit(np.array(upper)[:, None], variables))
# plu_min = np.hstack(np.vsplit(np.array(plu_min)[:, None], variables))
results["fc"] = dict.fromkeys(["fc", "lower", "upper"])
results["fc"]["fc"] = fc
results["fc"]["lower"] = lower
results["fc"]["upper"] = upper
# ---------------------------------------------------------------------
# parse output related to Granger-causality:
results["granger_caus"] = dict.fromkeys(["p", "test_stat"])
results["granger_caus"]["p"] = dict()
results["granger_caus"]["test_stat"] = dict()
vn = dataset.variable_names
# all possible combinations of potentially causing variables
# (at least 1 variable and not all variables together):
var_combs = sublists(vn, 1, len(vn)-1)
for causing in var_combs:
caused = tuple(el for el in vn if el not in causing)
granger_file = "vecm_" + dataset.__str__() + "_" + source + "_" \
+ dt_string + "_granger_causality_" \
+ stringify_var_names(causing) + "_" \
+ stringify_var_names(caused) + ".txt"
granger_file = os.path.join(here, granger_file)
granger_file = open(granger_file, encoding="latin_1")
granger_results = []
for line in granger_file:
str_number = r"\d+\.\d{4}"
regex_number = re.compile(str_number)
number = re.search(regex_number, line)
if number is None:
continue
number = float(number.group(0))
granger_results.append(number)
granger_file.close()
results["granger_caus"]["test_stat"][(causing, caused)] = \
granger_results[0]
results["granger_caus"]["p"][(causing, caused)] =\
granger_results[1]
# ---------------------------------------------------------------------
# parse output related to instant causality:
results["inst_caus"] = dict.fromkeys(["p", "test_stat"])
results["inst_caus"]["p"] = dict()
results["inst_caus"]["test_stat"] = dict()
vn = dataset.variable_names
# all possible combinations of potentially causing variables
# (at least 1 variable and not all variables together):
var_combs = sublists(vn, 1, len(vn)-1)
for causing in var_combs:
caused = tuple(el for el in vn if el not in causing)
# Though Granger- and instantaneous causality results are in the
# same file we use two separate files, since JMulTi is basing both
# tests on a VAR(p+1) model (where p is the number of lags in
# levels). According to Lutkepohl, Granger-causality tests are
# based on VAR(p+1) *but* tests for instantaneous causality are
# based on VAR(p)! Thus we have this separate file with JMulTi
# results for a VECM with the lag order reduced by one.
inst_file = "vecm_" + dataset.__str__() + "_" + source + "_" \
+ dt_string + "_inst_causality_" \
+ stringify_var_names(causing) + "_" \
+ stringify_var_names(caused) + ".txt"
inst_file = os.path.join(here, inst_file)
inst_file = open(inst_file, encoding="latin_1")
inst_results = []
for line in inst_file:
str_number = r"\d+\.\d{4}"
regex_number = re.compile(str_number)
number = re.search(regex_number, line)
if number is None:
continue
number = float(number.group(0))
inst_results.append(number)
inst_file.close()
results["inst_caus"]["test_stat"][(causing, caused)] = \
inst_results[2]
results["inst_caus"]["p"][(causing, caused)] = \
inst_results[3]
# ---------------------------------------------------------------------
# parse output related to impulse-response analysis:
ir_file = "vecm_" + dataset.__str__() + "_" + source + "_" + \
dt_string + "_ir" + ".txt"
ir_file = os.path.join(here, ir_file)
ir_file = open(ir_file, encoding='latin_1')
causing = None
caused = None
data = None
regex_vars = re.compile(r"\w+")
regex_vals = re.compile(r"-?\d+\.\d{4}")
line_start_causing = "time"
data_line_indicator = "point estimate"
data_rows_read = 0
for line in ir_file:
if causing is None and not line.startswith(line_start_causing):
continue # no relevant info in the header
if line.startswith(line_start_causing):
line = line[4:]
causing = re.findall(regex_vars, line)
# 21 periods shown in JMulTi output
data = np.empty((21, len(causing)))
continue
if caused is None:
caused = re.findall(regex_vars, line)
continue
# now start collecting the values:
if data_line_indicator not in line:
continue
start = line.find(data_line_indicator) + len(data_line_indicator)
line = line[start:]
data[data_rows_read] = re.findall(regex_vals, line)
data_rows_read += 1
ir_file.close()
results["ir"] = data
# ---------------------------------------------------------------------
# parse output related to lag order selection:
lagorder_file = "vecm_" + dataset.__str__() + "_" + source + "_" + \
dt_string + "_lagorder" + ".txt"
lagorder_file = os.path.join(here, lagorder_file)
lagorder_file = open(lagorder_file, encoding='latin_1')
results["lagorder"] = dict()
aic_start = "Akaike Info Criterion:"
fpe_start = "Final Prediction Error:"
hqic_start = "Hannan-Quinn Criterion:"
bic_start = "Schwarz Criterion:"
for line in lagorder_file:
if line.startswith(aic_start):
results["lagorder"]["aic"] = int(line[len(aic_start):])
elif line.startswith(fpe_start):
results["lagorder"]["fpe"] = int(line[len(fpe_start):])
elif line.startswith(hqic_start):
results["lagorder"]["hqic"] = int(line[len(hqic_start):])
elif line.startswith(bic_start):
results["lagorder"]["bic"] = int(line[len(bic_start):])
lagorder_file.close()
# ---------------------------------------------------------------------
# parse output related to non-normality-test:
test_norm_file = "vecm_" + dataset.__str__() + "_" + source + "_" + \
dt_string + "_diag" + ".txt"
test_norm_file = os.path.join(here, test_norm_file)
test_norm_file = open(test_norm_file, encoding='latin_1')
results["test_norm"] = dict()
reading_values = False
line_start_statistic = "joint test statistic:"
line_start_pvalue = " p-value:"
for line in test_norm_file:
if not reading_values:
if "Introduction to Multiple Time Series Analysis" in line:
reading_values = True # section w/ relevant results found
continue
if "joint_pvalue" in results["test_norm"].keys():
break
if line.startswith(line_start_statistic):
line_end = line[len(line_start_statistic):]
results["test_norm"]["joint_test_statistic"] = float(line_end)
if line.startswith(line_start_pvalue):
line_end = line[len(line_start_pvalue):]
results["test_norm"]["joint_pvalue"] = float(line_end)
test_norm_file.close()
# ---------------------------------------------------------------------
# parse output related to testing the whiteness of the residuals:
whiteness_file = "vecm_" + dataset.__str__() + "_" + source + "_" + \
dt_string + "_diag" + ".txt"
whiteness_file = os.path.join(here, whiteness_file)
whiteness_file = open(whiteness_file, encoding='latin_1')
results["whiteness"] = dict()
section_start_marker = "PORTMANTEAU TEST"
order_start = "tested order:"
statistic_start = "test statistic:"
p_start = " p-value:"
adj_statistic_start = "adjusted test statistic:"
unadjusted_finished = False
in_section = False
for line in whiteness_file:
if not in_section and section_start_marker not in line:
continue
if not in_section and section_start_marker in line:
in_section = True
continue
if line.startswith(order_start):
results["whiteness"]["tested order"] = int(
line[len(order_start):])
continue
if line.startswith(statistic_start):
results["whiteness"]["test statistic"] = float(
line[len(statistic_start):])
continue
if line.startswith(adj_statistic_start):
results["whiteness"]["test statistic adj."] = float(
line[len(adj_statistic_start):])
continue
if line.startswith(p_start): # same for unadjusted and adjusted
if not unadjusted_finished:
results["whiteness"]["p-value"] = \
float(line[len(p_start):])
unadjusted_finished = True
else:
results["whiteness"]["p-value adjusted"] = \
float(line[len(p_start):])
break
whiteness_file.close()
# ---------------------------------------------------------------------
if debug_mode:
print_debug_output(results, dt_string)
results_dict_per_det_terms[dt_s] = results
return results_dict_per_det_terms | Parameters
----------
dataset : module
A data module in the statsmodels/datasets directory that defines a
__str__() method returning the dataset's name.
dt_s_list : list
A list of strings where each string represents a combination of
deterministic terms.
Returns
-------
result : dict
A dict (keys: tuples of deterministic terms and seasonal terms)
of dicts (keys: strings "est" (for estimators),
"se" (for standard errors),
"t" (for t-values),
"p" (for p-values))
of dicts (keys: strings "alpha", "beta", "Gamma" and other results) | load_results_jmulti | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/tests/JMulTi_results/parse_jmulti_vecm_output.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/tests/JMulTi_results/parse_jmulti_vecm_output.py | BSD-3-Clause |
def dt_s_tup_to_string(dt_s_tup):
"""
Parameters
----------
dt_s_tup : tuple
A tuple of length 2.
The first entry is a string specifying the deterministic term without
any information about seasonal terms (for example "nc" or "c").
The second entry is an int specifying the number of seasons.
Returns
-------
dt_string : str
Returns dt_s_tup[0], if dt_s_tup[1] is 0 (i.e. no seasons).
If dt_s_tup[1] is > 0 (i.e. there are seasons) add an "s" to the string
in dt_s_tup[0] like in the following examples:
"nc" --> "ncs"
"c" --> "cs"
"ct" --> "cst"
"""
dt_string = dt_s_tup[0] # string for identifying the file to parse.
if dt_s_tup[1] > 0: # if there are seasons in the model
if dt_string == "nc":
dt_string = dt_string[:2] + "s"
if dt_string == "c" or dt_string == "ct":
dt_string = dt_string[:1] + "s" + dt_string[1:]
return dt_string | Parameters
----------
dt_s_tup : tuple
A tuple of length 2.
The first entry is a string specifying the deterministic term without
any information about seasonal terms (for example "nc" or "c").
The second entry is an int specifying the number of seasons.
Returns
-------
dt_string : str
Returns dt_s_tup[0], if dt_s_tup[1] is 0 (i.e. no seasons).
If dt_s_tup[1] is > 0 (i.e. there are seasons) add an "s" to the string
in dt_s_tup[0] like in the following examples:
"nc" --> "ncs"
"c" --> "cs"
"ct" --> "cst" | dt_s_tup_to_string | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/tests/JMulTi_results/parse_jmulti_var_output.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/tests/JMulTi_results/parse_jmulti_var_output.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.