code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def summary(self, alpha=.05, start=None, title=None, model_name=None,
display_params=True):
"""
Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
title : str, optional
The title of the summary table.
model_name : str
The name of the model used. Default is to use model class name.
display_params : bool, optional
Whether or not to display tables of estimated parameters. Default
is True. Usually only used internally.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
# Model specification results
model = self.model
if title is None:
title = 'Markov Switching Model Results'
if start is None:
start = 0
if self.data.dates is not None:
dates = self.data.dates
d = dates[start]
sample = ['%02d-%02d-%02d' % (d.month, d.day, d.year)]
d = dates[-1]
sample += ['- ' + '%02d-%02d-%02d' % (d.month, d.day, d.year)]
else:
sample = [str(start), ' - ' + str(self.model.nobs)]
# Standardize the model name as a list of str
if model_name is None:
model_name = model.__class__.__name__
# Create the tables
if not isinstance(model_name, list):
model_name = [model_name]
top_left = [('Dep. Variable:', None)]
top_left.append(('Model:', [model_name[0]]))
for i in range(1, len(model_name)):
top_left.append(('', ['+ ' + model_name[i]]))
top_left += [
('Date:', None),
('Time:', None),
('Sample:', [sample[0]]),
('', [sample[1]])
]
top_right = [
('No. Observations:', [self.model.nobs]),
('Log Likelihood', ["%#5.3f" % self.llf]),
('AIC', ["%#5.3f" % self.aic]),
('BIC', ["%#5.3f" % self.bic]),
('HQIC', ["%#5.3f" % self.hqic])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
summary = Summary()
summary.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
# Make parameters tables for each regime
import re
from statsmodels.iolib.summary import summary_params
def make_table(self, mask, title, strip_end=True):
res = (self, self.params[mask], self.bse[mask],
self.tvalues[mask], self.pvalues[mask],
self.conf_int(alpha)[mask])
param_names = [
re.sub(r'\[\d+\]$', '', name) for name in
np.array(self.data.param_names)[mask].tolist()
]
return summary_params(res, yname=None, xname=param_names,
alpha=alpha, use_t=False, title=title)
params = model.parameters
regime_masks = [[] for i in range(model.k_regimes)]
other_masks = {}
for key, switching in params.switching.items():
k_params = len(switching)
if key == 'regime_transition':
continue
other_masks[key] = []
for i in range(k_params):
if switching[i]:
for j in range(self.k_regimes):
regime_masks[j].append(params[j, key][i])
else:
other_masks[key].append(params[0, key][i])
for i in range(self.k_regimes):
mask = regime_masks[i]
if len(mask) > 0:
table = make_table(self, mask, 'Regime %d parameters' % i)
summary.tables.append(table)
mask = []
for key, _mask in other_masks.items():
mask.extend(_mask)
if len(mask) > 0:
table = make_table(self, mask, 'Non-switching parameters')
summary.tables.append(table)
# Transition parameters
mask = params['regime_transition']
table = make_table(self, mask, 'Regime transition parameters')
summary.tables.append(table)
# Add warnings/notes, added to text format only
etext = []
if hasattr(self, 'cov_type') and 'description' in self.cov_kwds:
etext.append(self.cov_kwds['description'])
if self._rank < len(self.params):
etext.append("Covariance matrix is singular or near-singular,"
" with condition number %6.3g. Standard errors may be"
" unstable." % _safe_cond(self.cov_params()))
if etext:
etext = [f"[{i + 1}] {text}"
for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
summary.add_extra_txt(etext)
return summary | Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
title : str, optional
The title of the summary table.
model_name : str
The name of the model used. Default is to use model class name.
display_params : bool, optional
Whether or not to display tables of estimated parameters. Default
is True. Usually only used internally.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary | summary | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def arma_order_select_ic(
y, max_ar=4, max_ma=2, ic="bic", trend="c", model_kw=None, fit_kw=None
):
"""
Compute information criteria for many ARMA models.
Parameters
----------
y : array_like
Array of time-series data.
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model.
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
Bunch
Dict-like object with attribute access. Each ic is an attribute with a
DataFrame for the results. The AR order used is the row index. The ma
order used is the column index. The minimum orders are available as
``ic_min_order``.
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : "css"} to fit_kw.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=["aic", "bic"], trend="n")
>>> res.aic_min_order
>>> res.bic_min_order
"""
max_ar = int_like(max_ar, "max_ar")
max_ma = int_like(max_ma, "max_ma")
trend = string_like(trend, "trend", options=("n", "c"))
model_kw = dict_like(model_kw, "model_kw", optional=True)
fit_kw = dict_like(fit_kw, "fit_kw", optional=True)
ar_range = [i for i in range(max_ar + 1)]
ma_range = [i for i in range(max_ma + 1)]
if isinstance(ic, str):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
model_kw = {} if model_kw is None else model_kw
fit_kw = {} if fit_kw is None else fit_kw
y_arr = array_like(y, "y", contiguous=True)
for ar in ar_range:
for ma in ma_range:
mod = _safe_arma_fit(y_arr, (ar, 0, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [pd.DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in res.items():
delta = np.ascontiguousarray(np.abs(result.min().min() - result))
ncols = delta.shape[1]
loc = np.argmin(delta)
min_res.update({i + "_min_order": (loc // ncols, loc % ncols)})
res.update(min_res)
return Bunch(**res) | Compute information criteria for many ARMA models.
Parameters
----------
y : array_like
Array of time-series data.
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model.
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
Bunch
Dict-like object with attribute access. Each ic is an attribute with a
DataFrame for the results. The AR order used is the row index. The ma
order used is the column index. The minimum orders are available as
``ic_min_order``.
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : "css"} to fit_kw.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=["aic", "bic"], trend="n")
>>> res.aic_min_order
>>> res.bic_min_order | arma_order_select_ic | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_arma_order_selection.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_arma_order_selection.py | BSD-3-Clause |
def _autolag(
mod,
endog,
exog,
startlag,
maxlag,
method,
modargs=(),
fitargs=(),
regresults=False,
):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array_like
nobs array containing endogenous variable
exog : array_like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {"aic", "bic", "t-stat"}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
# TODO: can tcol be replaced by maxlag + 2?
# TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in results.items())
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in results.items())
elif method == "t-stat":
# stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
# Default values to ensure that always set
bestlag = startlag + maxlag
icbest = 0.0
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
bestlag = lag
if np.abs(icbest) >= stop:
# Break for first lag with a significant t-stat
break
else:
raise ValueError(f"Information Criterion {method} not understood.")
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results | Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array_like
nobs array containing endogenous variable
exog : array_like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {"aic", "bic", "t-stat"}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column. | _autolag | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def adfuller(
x,
maxlag: int | None = None,
regression="c",
autolag="AIC",
store=False,
regresults=False,
):
"""
Augmented Dickey-Fuller unit root test.
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
The data series to test.
maxlag : {None, int}
Maximum lag which is included in test, default value of
12*(nobs/100)^{1/4} is used when ``None``.
regression : {"c","ct","ctt","n"}
Constant and trend order to include in regression.
* "c" : constant only (default).
* "ct" : constant and trend.
* "ctt" : constant, and linear and quadratic trend.
* "n" : no constant, no trend.
autolag : {"AIC", "BIC", "t-stat", None}
Method to use when automatically determining the lag length among the
values 0, 1, ..., maxlag.
* If "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion.
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
* If None, then the number of included lags is set to maxlag.
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False.
regresults : bool, optional
If True, the full regression results are returned. Default is False.
Returns
-------
adf : float
The test statistic.
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010).
usedlag : int
The number of lags used.
nobs : int
The number of observations used for the ADF regression and calculation
of the critical values.
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010).
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes.
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
See the notebook `Stationarity and detrending (ADF/KPSS)
<../examples/notebooks/generated/stationarity_detrending_adf_kpss.html>`__
for an overview.
References
----------
.. [1] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [2] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [3] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [4] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen"s
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
x = array_like(x, "x")
maxlag = int_like(maxlag, "maxlag", optional=True)
regression = string_like(regression, "regression", options=("c", "ct", "ctt", "n"))
autolag = string_like(
autolag, "autolag", optional=True, options=("aic", "bic", "t-stat")
)
store = bool_like(store, "store")
regresults = bool_like(regresults, "regresults")
if x.max() == x.min():
raise ValueError("Invalid input, x is constant")
if regresults:
store = True
trenddict = {None: "n", 0: "c", 1: "ct", 2: "ctt"}
if regression is None or isinstance(regression, int):
regression = trenddict[regression]
regression = regression.lower()
nobs = x.shape[0]
ntrend = len(regression) if regression != "n" else 0
if maxlag is None:
# from Greene referencing Schwert 1989
maxlag = int(np.ceil(12.0 * np.power(nobs / 100.0, 1 / 4.0)))
# -1 for the diff
maxlag = min(nobs // 2 - ntrend - 1, maxlag)
if maxlag < 0:
raise ValueError(
"sample size is too short to use selected " "regression component"
)
elif maxlag > nobs // 2 - ntrend - 1:
raise ValueError(
"maxlag must be less than (nobs/2 - 1 - ntrend) "
"where n trend is the number of included "
"deterministic regressors"
)
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim="both", original="in")
nobs = xdall.shape[0]
xdall[:, 0] = x[-nobs - 1 : -1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != "n":
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1
# 1 for level
# search for lag length with smallest information criteria
# Note: use the same number of observations to have comparable IC
# aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag, maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(
OLS,
xdshort,
fullRHS,
startlag,
maxlag,
autolag,
regresults=regresults,
)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
# rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim="both", original="in")
nobs = xdall.shape[0]
xdall[:, 0] = x[-nobs - 1 : -1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != "n":
resols = OLS(xdshort, add_trend(xdall[:, : usedlag + 1], regression)).fit()
else:
resols = OLS(xdshort, xdall[:, : usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {
"1%": critvalues[0],
"5%": critvalues[1],
"10%": critvalues[2],
}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = "The coefficient on the lagged level equals 1 - unit root"
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = "Augmented Dickey-Fuller Test Results"
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest | Augmented Dickey-Fuller unit root test.
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
The data series to test.
maxlag : {None, int}
Maximum lag which is included in test, default value of
12*(nobs/100)^{1/4} is used when ``None``.
regression : {"c","ct","ctt","n"}
Constant and trend order to include in regression.
* "c" : constant only (default).
* "ct" : constant and trend.
* "ctt" : constant, and linear and quadratic trend.
* "n" : no constant, no trend.
autolag : {"AIC", "BIC", "t-stat", None}
Method to use when automatically determining the lag length among the
values 0, 1, ..., maxlag.
* If "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion.
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
* If None, then the number of included lags is set to maxlag.
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False.
regresults : bool, optional
If True, the full regression results are returned. Default is False.
Returns
-------
adf : float
The test statistic.
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010).
usedlag : int
The number of lags used.
nobs : int
The number of observations used for the ADF regression and calculation
of the critical values.
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010).
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes.
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
See the notebook `Stationarity and detrending (ADF/KPSS)
<../examples/notebooks/generated/stationarity_detrending_adf_kpss.html>`__
for an overview.
References
----------
.. [1] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [2] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [3] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [4] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen"s
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html | adfuller | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def acovf(x, adjusted=False, demean=True, fft=True, missing="none", nlag=None):
"""
Estimate autocovariances.
Parameters
----------
x : array_like
Time series data. Must be 1d.
adjusted : bool, default False
If True, then denominators is n-k, otherwise n.
demean : bool, default True
If True, then subtract the mean x from each element of x.
fft : bool, default True
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str, default "none"
A string in ["none", "raise", "conservative", "drop"] specifying how
the NaNs are to be treated. "none" performs no checks. "raise" raises
an exception if NaN values are found. "drop" removes the missing
observations and then estimates the autocovariances treating the
non-missing as contiguous. "conservative" computes the autocovariance
using nan-ops so that nans are removed when computing the mean
and cross-products that are used to estimate the autocovariance.
When using "conservative", n is set to the number of non-missing
observations.
nlag : {int, None}, default None
Limit the number of autocovariances returned. Size of returned
array is nlag + 1. Setting nlag when fft is False uses a simple,
direct estimator of the autocovariances that only computes the first
nlag + 1 values. This can be much faster when the time series is long
and only a small number of autocovariances are needed.
Returns
-------
ndarray
The estimated autocovariances.
References
----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
adjusted = bool_like(adjusted, "adjusted")
demean = bool_like(demean, "demean")
fft = bool_like(fft, "fft", optional=False)
missing = string_like(
missing, "missing", options=("none", "raise", "conservative", "drop")
)
nlag = int_like(nlag, "nlag", optional=True)
x = array_like(x, "x", ndim=1)
missing = missing.lower()
if missing == "none":
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == "raise":
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) # bool
if missing == "conservative":
# Must copy for thread safety
x = x.copy()
x[~notmask_bool] = 0
else: # "drop"
x = x[notmask_bool] # copies non-missing
notmask_int = notmask_bool.astype(int) # int
if demean and deal_with_masked:
# whether "drop" or "conservative":
xo = x - x.sum() / notmask_int.sum()
if missing == "conservative":
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
lag_len = nlag
if nlag is None:
lag_len = n - 1
elif nlag > n - 1:
raise ValueError("nlag must be smaller than nobs - 1")
if not fft and nlag is not None:
acov = np.empty(lag_len + 1)
acov[0] = xo.dot(xo)
for i in range(lag_len):
acov[i + 1] = xo[i + 1 :].dot(xo[: -(i + 1)])
if not deal_with_masked or missing == "drop":
if adjusted:
acov /= n - np.arange(lag_len + 1)
else:
acov /= n
else:
if adjusted:
divisor = np.empty(lag_len + 1, dtype=np.int64)
divisor[0] = notmask_int.sum()
for i in range(lag_len):
divisor[i + 1] = notmask_int[i + 1 :].dot(notmask_int[: -(i + 1)])
divisor[divisor == 0] = 1
acov /= divisor
else: # biased, missing data but npt "drop"
acov /= notmask_int.sum()
return acov
if adjusted and deal_with_masked and missing == "conservative":
d = np.correlate(notmask_int, notmask_int, "full")
d[d == 0] = 1
elif adjusted:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked:
# biased and NaNs given and ("drop" or "conservative")
d = notmask_int.sum() * np.ones(2 * n - 1)
else: # biased and no NaNs or missing=="none"
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1 :]
acov = acov.real
else:
acov = np.correlate(xo, xo, "full")[n - 1 :] / d[n - 1 :]
if nlag is not None:
# Copy to allow gc of full array rather than view
return acov[: lag_len + 1].copy()
return acov | Estimate autocovariances.
Parameters
----------
x : array_like
Time series data. Must be 1d.
adjusted : bool, default False
If True, then denominators is n-k, otherwise n.
demean : bool, default True
If True, then subtract the mean x from each element of x.
fft : bool, default True
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str, default "none"
A string in ["none", "raise", "conservative", "drop"] specifying how
the NaNs are to be treated. "none" performs no checks. "raise" raises
an exception if NaN values are found. "drop" removes the missing
observations and then estimates the autocovariances treating the
non-missing as contiguous. "conservative" computes the autocovariance
using nan-ops so that nans are removed when computing the mean
and cross-products that are used to estimate the autocovariance.
When using "conservative", n is set to the number of non-missing
observations.
nlag : {int, None}, default None
Limit the number of autocovariances returned. Size of returned
array is nlag + 1. Setting nlag when fft is False uses a simple,
direct estimator of the autocovariances that only computes the first
nlag + 1 values. This can be much faster when the time series is long
and only a small number of autocovariances are needed.
Returns
-------
ndarray
The estimated autocovariances.
References
----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392. | acovf | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def q_stat(x, nobs):
"""
Compute Ljung-Box Q Statistic.
Parameters
----------
x : array_like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int, optional
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : ndarray
Ljung-Box Q-statistic for autocorrelation parameters.
p-value : ndarray
P-value of the Q statistic.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
Ljung-Box Q-test for autocorrelation in time series based
on a time series rather than the estimated autocorrelation
function.
Notes
-----
Designed to be used with acf.
"""
x = array_like(x, "x")
nobs = int_like(nobs, "nobs")
ret = (
nobs * (nobs + 2) * np.cumsum((1.0 / (nobs - np.arange(1, len(x) + 1))) * x**2)
)
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2 | Compute Ljung-Box Q Statistic.
Parameters
----------
x : array_like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int, optional
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : ndarray
Ljung-Box Q-statistic for autocorrelation parameters.
p-value : ndarray
P-value of the Q statistic.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
Ljung-Box Q-test for autocorrelation in time series based
on a time series rather than the estimated autocorrelation
function.
Notes
-----
Designed to be used with acf. | q_stat | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def acf(
x,
adjusted=False,
nlags=None,
qstat=False,
fft=True,
alpha=None,
bartlett_confint=True,
missing="none",
):
"""
Calculate the autocorrelation function.
Parameters
----------
x : array_like
The time series data.
adjusted : bool, default False
If True, then denominators for autocovariance are n-k, otherwise n.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1). The returned value
includes lag 0 (ie., 1) so size of the acf vector is (nlags + 1,).
qstat : bool, default False
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, default True
If True, computes the ACF via FFT.
alpha : scalar, default None
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett"s formula.
bartlett_confint : bool, default True
Confidence intervals for ACF values are generally placed at 2
standard errors around r_k. The formula used for standard error
depends upon the situation. If the autocorrelations are being used
to test for randomness of residuals as part of the ARIMA routine,
the standard errors are determined assuming the residuals are white
noise. The approximate formula for any lag is that standard error
of each r_k = 1/sqrt(N). See section 9.4 of [2] for more details on
the 1/sqrt(N) result. For more elementary discussion, see section 5.3.2
in [3].
For the ACF of raw data, the standard error at a lag k is
found as if the right model was an MA(k-1). This allows the possible
interpretation that if all autocorrelations past a certain lag are
within the limits, the model might be an MA of order defined by the
last significant autocorrelation. In this case, a moving average
model is assumed for the data and the standard errors for the
confidence intervals should be generated using Bartlett's formula.
For more details on Bartlett formula result, see section 7.2 in [2].
missing : str, default "none"
A string in ["none", "raise", "conservative", "drop"] specifying how
the NaNs are to be treated. "none" performs no checks. "raise" raises
an exception if NaN values are found. "drop" removes the missing
observations and then estimates the autocovariances treating the
non-missing as contiguous. "conservative" computes the autocovariance
using nan-ops so that nans are removed when computing the mean
and cross-products that are used to estimate the autocovariance.
When using "conservative", n is set to the number of non-missing
observations.
Returns
-------
acf : ndarray
The autocorrelation function for lags 0, 1, ..., nlags. Shape
(nlags+1,).
confint : ndarray, optional
Confidence intervals for the ACF at lags 0, 1, ..., nlags. Shape
(nlags + 1, 2). Returned if alpha is not None. The confidence
intervals are centered on the estimated ACF values. This behavior
differs from plot_acf which centers the confidence intervals on 0.
qstat : ndarray, optional
The Ljung-Box Q-Statistic for lags 1, 2, ..., nlags (excludes lag
zero). Returned if q_stat is True.
pvalues : ndarray, optional
The p-values associated with the Q-statistics for lags 1, 2, ...,
nlags (excludes lag zero). Returned if q_stat is True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
For very long time series it is recommended to use fft convolution instead.
When fft is False uses a simple, direct estimator of the autocovariances
that only computes the first nlag + 1 values. This can be much faster when
the time series is long and only a small number of autocovariances are
needed.
If adjusted is true, the denominator for the autocovariance is adjusted
for the loss of data.
References
----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
.. [2] Brockwell and Davis, 1987. Time Series Theory and Methods
.. [3] Brockwell and Davis, 2010. Introduction to Time Series and
Forecasting, 2nd edition.
See Also
--------
statsmodels.tsa.stattools.acf
Estimate the autocorrelation function.
statsmodels.graphics.tsaplots.plot_acf
Plot autocorrelations and confidence intervals.
"""
adjusted = bool_like(adjusted, "adjusted")
nlags = int_like(nlags, "nlags", optional=True)
qstat = bool_like(qstat, "qstat")
fft = bool_like(fft, "fft", optional=False)
alpha = float_like(alpha, "alpha", optional=True)
missing = string_like(
missing, "missing", options=("none", "raise", "conservative", "drop")
)
x = array_like(x, "x")
# TODO: should this shrink for missing="drop" and NaNs in x?
nobs = x.shape[0]
if nlags is None:
nlags = min(int(10 * np.log10(nobs)), nobs - 1)
avf = acovf(x, adjusted=adjusted, demean=True, fft=fft, missing=missing)
acf = avf[: nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
_alpha = alpha if alpha is not None else 0.05
if bartlett_confint:
varacf = np.ones_like(acf) / nobs
varacf[0] = 0
varacf[1] = 1.0 / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1] ** 2)
else:
varacf = 1.0 / len(x)
interval = stats.norm.ppf(1 - _alpha / 2.0) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue | Calculate the autocorrelation function.
Parameters
----------
x : array_like
The time series data.
adjusted : bool, default False
If True, then denominators for autocovariance are n-k, otherwise n.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1). The returned value
includes lag 0 (ie., 1) so size of the acf vector is (nlags + 1,).
qstat : bool, default False
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, default True
If True, computes the ACF via FFT.
alpha : scalar, default None
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett"s formula.
bartlett_confint : bool, default True
Confidence intervals for ACF values are generally placed at 2
standard errors around r_k. The formula used for standard error
depends upon the situation. If the autocorrelations are being used
to test for randomness of residuals as part of the ARIMA routine,
the standard errors are determined assuming the residuals are white
noise. The approximate formula for any lag is that standard error
of each r_k = 1/sqrt(N). See section 9.4 of [2] for more details on
the 1/sqrt(N) result. For more elementary discussion, see section 5.3.2
in [3].
For the ACF of raw data, the standard error at a lag k is
found as if the right model was an MA(k-1). This allows the possible
interpretation that if all autocorrelations past a certain lag are
within the limits, the model might be an MA of order defined by the
last significant autocorrelation. In this case, a moving average
model is assumed for the data and the standard errors for the
confidence intervals should be generated using Bartlett's formula.
For more details on Bartlett formula result, see section 7.2 in [2].
missing : str, default "none"
A string in ["none", "raise", "conservative", "drop"] specifying how
the NaNs are to be treated. "none" performs no checks. "raise" raises
an exception if NaN values are found. "drop" removes the missing
observations and then estimates the autocovariances treating the
non-missing as contiguous. "conservative" computes the autocovariance
using nan-ops so that nans are removed when computing the mean
and cross-products that are used to estimate the autocovariance.
When using "conservative", n is set to the number of non-missing
observations.
Returns
-------
acf : ndarray
The autocorrelation function for lags 0, 1, ..., nlags. Shape
(nlags+1,).
confint : ndarray, optional
Confidence intervals for the ACF at lags 0, 1, ..., nlags. Shape
(nlags + 1, 2). Returned if alpha is not None. The confidence
intervals are centered on the estimated ACF values. This behavior
differs from plot_acf which centers the confidence intervals on 0.
qstat : ndarray, optional
The Ljung-Box Q-Statistic for lags 1, 2, ..., nlags (excludes lag
zero). Returned if q_stat is True.
pvalues : ndarray, optional
The p-values associated with the Q-statistics for lags 1, 2, ...,
nlags (excludes lag zero). Returned if q_stat is True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
For very long time series it is recommended to use fft convolution instead.
When fft is False uses a simple, direct estimator of the autocovariances
that only computes the first nlag + 1 values. This can be much faster when
the time series is long and only a small number of autocovariances are
needed.
If adjusted is true, the denominator for the autocovariance is adjusted
for the loss of data.
References
----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
.. [2] Brockwell and Davis, 1987. Time Series Theory and Methods
.. [3] Brockwell and Davis, 2010. Introduction to Time Series and
Forecasting, 2nd edition.
See Also
--------
statsmodels.tsa.stattools.acf
Estimate the autocorrelation function.
statsmodels.graphics.tsaplots.plot_acf
Plot autocorrelations and confidence intervals. | acf | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def pacf_yw(
x: ArrayLike1D,
nlags: int | None = None,
method: Literal["adjusted", "mle"] = "adjusted",
) -> np.ndarray:
"""
Partial autocorrelation estimated with non-recursive yule_walker.
Parameters
----------
x : array_like
The observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1).
method : {"adjusted", "mle"}, default "adjusted"
The method for the autocovariance calculations in yule walker.
Returns
-------
ndarray
The partial autocorrelations, maxlag+1 elements.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
"""
x = array_like(x, "x")
nlags = int_like(nlags, "nlags", optional=True)
nobs = x.shape[0]
if nlags is None:
nlags = max(min(int(10 * np.log10(nobs)), nobs - 1), 1)
method = string_like(method, "method", options=("adjusted", "mle"))
pacf = [1.0]
with warnings.catch_warnings():
warnings.simplefilter("once", ValueWarning)
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf) | Partial autocorrelation estimated with non-recursive yule_walker.
Parameters
----------
x : array_like
The observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1).
method : {"adjusted", "mle"}, default "adjusted"
The method for the autocovariance calculations in yule walker.
Returns
-------
ndarray
The partial autocorrelations, maxlag+1 elements.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations. | pacf_yw | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def pacf_burg(
x: ArrayLike1D, nlags: int | None = None, demean: bool = True
) -> tuple[np.ndarray, np.ndarray]:
"""
Calculate Burg"s partial autocorrelation estimator.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1).
demean : bool, optional
Flag indicating to demean that data. Set to False if x has been
previously demeaned.
Returns
-------
pacf : ndarray
Partial autocorrelations for lags 0, 1, ..., nlag.
sigma2 : ndarray
Residual variance estimates where the value in position m is the
residual variance in an AR model that includes m lags.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
"""
x = array_like(x, "x")
if demean:
x = x - x.mean()
nobs = x.shape[0]
p = nlags if nlags is not None else min(int(10 * np.log10(nobs)), nobs - 1)
p = max(p, 1)
if p > nobs - 1:
raise ValueError("nlags must be smaller than nobs - 1")
d = np.zeros(p + 1)
d[0] = 2 * x.dot(x)
pacf = np.zeros(p + 1)
u = x[::-1].copy()
v = x[::-1].copy()
d[1] = u[:-1].dot(u[:-1]) + v[1:].dot(v[1:])
pacf[1] = 2 / d[1] * v[1:].dot(u[:-1])
last_u = np.empty_like(u)
last_v = np.empty_like(v)
for i in range(1, p):
last_u[:] = u
last_v[:] = v
u[1:] = last_u[:-1] - pacf[i] * last_v[1:]
v[1:] = last_v[1:] - pacf[i] * last_u[:-1]
d[i + 1] = (1 - pacf[i] ** 2) * d[i] - v[i] ** 2 - u[-1] ** 2
pacf[i + 1] = 2 / d[i + 1] * v[i + 1 :].dot(u[i:-1])
sigma2 = (1 - pacf**2) * d / (2.0 * (nobs - np.arange(0, p + 1)))
pacf[0] = 1 # Insert the 0 lag partial autocorrel
return pacf, sigma2 | Calculate Burg"s partial autocorrelation estimator.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1).
demean : bool, optional
Flag indicating to demean that data. Set to False if x has been
previously demeaned.
Returns
-------
pacf : ndarray
Partial autocorrelations for lags 0, 1, ..., nlag.
sigma2 : ndarray
Residual variance estimates where the value in position m is the
residual variance in an AR model that includes m lags.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer. | pacf_burg | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def pacf_ols(
x: ArrayLike1D,
nlags: int | None = None,
efficient: bool = True,
adjusted: bool = False,
) -> np.ndarray:
"""
Calculate partial autocorrelations via OLS.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1).
efficient : bool, optional
If true, uses the maximum number of available observations to compute
each partial autocorrelation. If not, uses the same number of
observations to compute all pacf values.
adjusted : bool, optional
Adjust each partial autocorrelation by n / (n - lag).
Returns
-------
ndarray
The partial autocorrelations, (maxlag,) array corresponding to lags
0, 1, ..., maxlag.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
This solves a separate OLS estimation for each desired lag using method in
[1]_. Setting efficient to True has two effects. First, it uses
`nobs - lag` observations of estimate each pacf. Second, it re-estimates
the mean in each regression. If efficient is False, then the data are first
demeaned, and then `nobs - maxlag` observations are used to estimate each
partial autocorrelation.
The inefficient estimator appears to have better finite sample properties.
This option should only be used in time series that are covariance
stationary.
OLS estimation of the pacf does not guarantee that all pacf values are
between -1 and 1.
References
----------
.. [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015).
Time series analysis: forecasting and control. John Wiley & Sons, p. 66
"""
x = array_like(x, "x")
nlags = int_like(nlags, "nlags", optional=True)
efficient = bool_like(efficient, "efficient")
adjusted = bool_like(adjusted, "adjusted")
nobs = x.shape[0]
if nlags is None:
nlags = max(min(int(10 * np.log10(nobs)), nobs // 2), 1)
if nlags > nobs // 2:
raise ValueError(f"nlags must be smaller than nobs // 2 ({nobs//2})")
pacf = np.empty(nlags + 1)
pacf[0] = 1.0
if efficient:
xlags, x0 = lagmat(x, nlags, original="sep")
xlags = add_constant(xlags)
for k in range(1, nlags + 1):
params = lstsq(xlags[k:, : k + 1], x0[k:], rcond=None)[0]
pacf[k] = np.squeeze(params[-1])
else:
x = x - np.mean(x)
# Create a single set of lags for multivariate OLS
xlags, x0 = lagmat(x, nlags, original="sep", trim="both")
for k in range(1, nlags + 1):
params = lstsq(xlags[:, :k], x0, rcond=None)[0]
# Last coefficient corresponds to PACF value (see [1])
pacf[k] = np.squeeze(params[-1])
if adjusted:
pacf *= nobs / (nobs - np.arange(nlags + 1))
return pacf | Calculate partial autocorrelations via OLS.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs - 1).
efficient : bool, optional
If true, uses the maximum number of available observations to compute
each partial autocorrelation. If not, uses the same number of
observations to compute all pacf values.
adjusted : bool, optional
Adjust each partial autocorrelation by n / (n - lag).
Returns
-------
ndarray
The partial autocorrelations, (maxlag,) array corresponding to lags
0, 1, ..., maxlag.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
This solves a separate OLS estimation for each desired lag using method in
[1]_. Setting efficient to True has two effects. First, it uses
`nobs - lag` observations of estimate each pacf. Second, it re-estimates
the mean in each regression. If efficient is False, then the data are first
demeaned, and then `nobs - maxlag` observations are used to estimate each
partial autocorrelation.
The inefficient estimator appears to have better finite sample properties.
This option should only be used in time series that are covariance
stationary.
OLS estimation of the pacf does not guarantee that all pacf values are
between -1 and 1.
References
----------
.. [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015).
Time series analysis: forecasting and control. John Wiley & Sons, p. 66 | pacf_ols | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def pacf(
x: ArrayLike1D,
nlags: int | None = None,
method: Literal[
"yw",
"ywadjusted",
"ols",
"ols-inefficient",
"ols-adjusted",
"ywm",
"ywmle",
"ld",
"ldadjusted",
"ldb",
"ldbiased",
"burg",
] = "ywadjusted",
alpha: float | None = None,
) -> np.ndarray | tuple[np.ndarray, np.ndarray]:
"""
Partial autocorrelation estimate.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs // 2 - 1). The returned value
includes lag 0 (ie., 1) so size of the pacf vector is (nlags + 1,).
method : str, default "ywunbiased"
Specifies which method for the calculations to use.
- "yw" or "ywadjusted" : Yule-Walker with sample-size adjustment in
denominator for acovf. Default.
- "ywm" or "ywmle" : Yule-Walker without adjustment.
- "ols" : regression of time series on lags of it and on constant.
- "ols-inefficient" : regression of time series on lags using a single
common sample to estimate all pacf coefficients.
- "ols-adjusted" : regression of time series on lags with a bias
adjustment.
- "ld" or "ldadjusted" : Levinson-Durbin recursion with bias
correction.
- "ldb" or "ldbiased" : Levinson-Durbin recursion without bias
correction.
- "burg" : Burg"s partial autocorrelation estimator.
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x)).
Returns
-------
pacf : ndarray
The partial autocorrelations for lags 0, 1, ..., nlags. Shape
(nlags+1,).
confint : ndarray, optional
Confidence intervals for the PACF at lags 0, 1, ..., nlags. Shape
(nlags + 1, 2). Returned if alpha is not None.
See Also
--------
statsmodels.tsa.stattools.acf
Estimate the autocorrelation function.
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg's method.
statsmodels.graphics.tsaplots.plot_pacf
Plot partial autocorrelations and confidence intervals.
Notes
-----
Based on simulation evidence across a range of low-order ARMA models,
the best methods based on root MSE are Yule-Walker (MLW), Levinson-Durbin
(MLE) and Burg, respectively. The estimators with the lowest bias included
included these three in addition to OLS and OLS-adjusted.
Yule-Walker (adjusted) and Levinson-Durbin (adjusted) performed
consistently worse than the other options.
"""
nlags = int_like(nlags, "nlags", optional=True)
methods = (
"ols",
"ols-inefficient",
"ols-adjusted",
"yw",
"ywa",
"ld",
"ywadjusted",
"yw_adjusted",
"ywm",
"ywmle",
"yw_mle",
"lda",
"ldadjusted",
"ld_adjusted",
"ldb",
"ldbiased",
"ld_biased",
"burg",
)
x = array_like(x, "x", maxdim=2)
method = string_like(method, "method", options=methods)
alpha = float_like(alpha, "alpha", optional=True)
nobs = x.shape[0]
if nlags is None:
nlags = min(int(10 * np.log10(nobs)), nobs // 2 - 1)
nlags = max(nlags, 1)
if nlags > x.shape[0] // 2:
raise ValueError(
"Can only compute partial correlations for lags up to 50% of the "
f"sample size. The requested nlags {nlags} must be < "
f"{x.shape[0] // 2}."
)
if method in ("ols", "ols-inefficient", "ols-adjusted"):
efficient = "inefficient" not in method
adjusted = "adjusted" in method
ret = pacf_ols(x, nlags=nlags, efficient=efficient, adjusted=adjusted)
elif method in ("yw", "ywa", "ywadjusted", "yw_adjusted"):
ret = pacf_yw(x, nlags=nlags, method="adjusted")
elif method in ("ywm", "ywmle", "yw_mle"):
ret = pacf_yw(x, nlags=nlags, method="mle")
elif method in ("ld", "lda", "ldadjusted", "ld_adjusted"):
acv = acovf(x, adjusted=True, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
elif method == "burg":
ret, _ = pacf_burg(x, nlags=nlags, demean=True)
# inconsistent naming with ywmle
else: # method in ("ldb", "ldbiased", "ld_biased")
acv = acovf(x, adjusted=False, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
if alpha is not None:
varacf = 1.0 / len(x) # for all lags >=1
interval = stats.norm.ppf(1.0 - alpha / 2.0) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret | Partial autocorrelation estimate.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to return autocorrelation for. If not provided,
uses min(10 * np.log10(nobs), nobs // 2 - 1). The returned value
includes lag 0 (ie., 1) so size of the pacf vector is (nlags + 1,).
method : str, default "ywunbiased"
Specifies which method for the calculations to use.
- "yw" or "ywadjusted" : Yule-Walker with sample-size adjustment in
denominator for acovf. Default.
- "ywm" or "ywmle" : Yule-Walker without adjustment.
- "ols" : regression of time series on lags of it and on constant.
- "ols-inefficient" : regression of time series on lags using a single
common sample to estimate all pacf coefficients.
- "ols-adjusted" : regression of time series on lags with a bias
adjustment.
- "ld" or "ldadjusted" : Levinson-Durbin recursion with bias
correction.
- "ldb" or "ldbiased" : Levinson-Durbin recursion without bias
correction.
- "burg" : Burg"s partial autocorrelation estimator.
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x)).
Returns
-------
pacf : ndarray
The partial autocorrelations for lags 0, 1, ..., nlags. Shape
(nlags+1,).
confint : ndarray, optional
Confidence intervals for the PACF at lags 0, 1, ..., nlags. Shape
(nlags + 1, 2). Returned if alpha is not None.
See Also
--------
statsmodels.tsa.stattools.acf
Estimate the autocorrelation function.
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg's method.
statsmodels.graphics.tsaplots.plot_pacf
Plot partial autocorrelations and confidence intervals.
Notes
-----
Based on simulation evidence across a range of low-order ARMA models,
the best methods based on root MSE are Yule-Walker (MLW), Levinson-Durbin
(MLE) and Burg, respectively. The estimators with the lowest bias included
included these three in addition to OLS and OLS-adjusted.
Yule-Walker (adjusted) and Levinson-Durbin (adjusted) performed
consistently worse than the other options. | pacf | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def ccovf(x, y, adjusted=True, demean=True, fft=True):
"""
Calculate the cross-covariance between two series.
Parameters
----------
x, y : array_like
The time series data to use in the calculation.
adjusted : bool, optional
If True, then denominators for cross-covariance are n-k, otherwise n.
demean : bool, optional
Flag indicating whether to demean x and y.
fft : bool, default True
If True, use FFT convolution. This method should be preferred
for long time series.
Returns
-------
ndarray
The estimated cross-covariance function: the element at index k
is the covariance between {x[k], x[k+1], ..., x[n]} and
{y[0], y[1], ..., y[m-k]}, where n and m are the lengths of x and y,
respectively.
"""
x = array_like(x, "x")
y = array_like(y, "y")
adjusted = bool_like(adjusted, "adjusted")
demean = bool_like(demean, "demean")
fft = bool_like(fft, "fft", optional=False)
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if adjusted:
d = np.arange(n, 0, -1)
else:
d = n
method = "fft" if fft else "direct"
return correlate(xo, yo, "full", method=method)[n - 1 :] / d | Calculate the cross-covariance between two series.
Parameters
----------
x, y : array_like
The time series data to use in the calculation.
adjusted : bool, optional
If True, then denominators for cross-covariance are n-k, otherwise n.
demean : bool, optional
Flag indicating whether to demean x and y.
fft : bool, default True
If True, use FFT convolution. This method should be preferred
for long time series.
Returns
-------
ndarray
The estimated cross-covariance function: the element at index k
is the covariance between {x[k], x[k+1], ..., x[n]} and
{y[0], y[1], ..., y[m-k]}, where n and m are the lengths of x and y,
respectively. | ccovf | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def ccf(x, y, adjusted=True, fft=True, *, nlags=None, alpha=None):
"""
The cross-correlation function.
Parameters
----------
x, y : array_like
The time series data to use in the calculation.
adjusted : bool
If True, then denominators for cross-correlation are n-k, otherwise n.
fft : bool, default True
If True, use FFT convolution. This method should be preferred
for long time series.
nlags : int, optional
Number of lags to return cross-correlations for. If not provided,
the number of lags equals len(x).
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x)).
Returns
-------
ndarray
The cross-correlation function of x and y: the element at index k
is the correlation between {x[k], x[k+1], ..., x[n]} and
{y[0], y[1], ..., y[m-k]}, where n and m are the lengths of x and y,
respectively.
confint : ndarray, optional
Confidence intervals for the CCF at lags 0, 1, ..., nlags-1 using the
level given by alpha and the standard deviation calculated as
1/sqrt(len(x)) [1]. Shape (nlags, 2). Returned if alpha is not None.
Notes
-----
If adjusted is True, the denominator for the cross-correlation is adjusted.
References
----------
.. [1] Brockwell and Davis, 2016. Introduction to Time Series and
Forecasting, 3rd edition, p. 242.
"""
x = array_like(x, "x")
y = array_like(y, "y")
adjusted = bool_like(adjusted, "adjusted")
fft = bool_like(fft, "fft", optional=False)
cvf = ccovf(x, y, adjusted=adjusted, demean=True, fft=fft)
ret = cvf / (np.std(x) * np.std(y))
ret = ret[:nlags]
if alpha is not None:
interval = stats.norm.ppf(1.0 - alpha / 2.0) / np.sqrt(len(x))
confint = ret.reshape(-1, 1) + interval * np.array([-1, 1])
return ret, confint
else:
return ret | The cross-correlation function.
Parameters
----------
x, y : array_like
The time series data to use in the calculation.
adjusted : bool
If True, then denominators for cross-correlation are n-k, otherwise n.
fft : bool, default True
If True, use FFT convolution. This method should be preferred
for long time series.
nlags : int, optional
Number of lags to return cross-correlations for. If not provided,
the number of lags equals len(x).
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x)).
Returns
-------
ndarray
The cross-correlation function of x and y: the element at index k
is the correlation between {x[k], x[k+1], ..., x[n]} and
{y[0], y[1], ..., y[m-k]}, where n and m are the lengths of x and y,
respectively.
confint : ndarray, optional
Confidence intervals for the CCF at lags 0, 1, ..., nlags-1 using the
level given by alpha and the standard deviation calculated as
1/sqrt(len(x)) [1]. Shape (nlags, 2). Returned if alpha is not None.
Notes
-----
If adjusted is True, the denominator for the cross-correlation is adjusted.
References
----------
.. [1] Brockwell and Davis, 2016. Introduction to Time Series and
Forecasting, 3rd edition, p. 242. | ccf | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def levinson_durbin(s, nlags=10, isacov=False):
"""
Levinson-Durbin recursion for autoregressive processes.
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0.
nlags : int, optional
The largest lag to include in recursion or order of the autoregressive
process.
isacov : bool, optional
Flag indicating whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
The estimate of the error variance.
arcoefs : ndarray
The estimate of the autoregressive coefficients for a model including
nlags.
pacf : ndarray
The partial autocorrelation function.
sigma : ndarray
The entire sigma array from intermediate result, last value is sigma_v.
phi : ndarray
The entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags).
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
"""
s = array_like(s, "s")
nlags = int_like(nlags, "nlags")
isacov = bool_like(isacov, "isacov")
order = nlags
if isacov:
sxx_m = s
else:
sxx_m = acovf(s, fft=False)[: order + 1] # not tested
phi = np.zeros((order + 1, order + 1), "d")
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k - 1], sxx_m[1:k][::-1])) / sig[k - 1]
for j in range(1, k):
phi[j, k] = phi[j, k - 1] - phi[k, k] * phi[k - j, k - 1]
sig[k] = sig[k - 1] * (1 - phi[k, k] ** 2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.0
return sigma_v, arcoefs, pacf_, sig, phi # return everything | Levinson-Durbin recursion for autoregressive processes.
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0.
nlags : int, optional
The largest lag to include in recursion or order of the autoregressive
process.
isacov : bool, optional
Flag indicating whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
The estimate of the error variance.
arcoefs : ndarray
The estimate of the autoregressive coefficients for a model including
nlags.
pacf : ndarray
The partial autocorrelation function.
sigma : ndarray
The entire sigma array from intermediate result, last value is sigma_v.
phi : ndarray
The entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags).
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft). | levinson_durbin | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def levinson_durbin_pacf(pacf, nlags=None):
"""
Levinson-Durbin algorithm that returns the acf and ar coefficients.
Parameters
----------
pacf : array_like
Partial autocorrelation array for lags 0, 1, ... p.
nlags : int, optional
Number of lags in the AR model. If omitted, returns coefficients from
an AR(p) and the first p autocorrelations.
Returns
-------
arcoefs : ndarray
AR coefficients computed from the partial autocorrelations.
acf : ndarray
The acf computed from the partial autocorrelations. Array returned
contains the autocorrelations corresponding to lags 0, 1, ..., p.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
"""
pacf = array_like(pacf, "pacf")
nlags = int_like(nlags, "nlags", optional=True)
pacf = np.squeeze(np.asarray(pacf))
if pacf[0] != 1:
raise ValueError(
"The first entry of the pacf corresponds to lags 0 " "and so must be 1."
)
pacf = pacf[1:]
n = pacf.shape[0]
if nlags is not None:
if nlags > n:
raise ValueError(
"Must provide at least as many values from the "
"pacf as the number of lags."
)
pacf = pacf[:nlags]
n = pacf.shape[0]
acf = np.zeros(n + 1)
acf[1] = pacf[0]
nu = np.cumprod(1 - pacf**2)
arcoefs = pacf.copy()
for i in range(1, n):
prev = arcoefs[: -(n - i)].copy()
arcoefs[: -(n - i)] = prev - arcoefs[i] * prev[::-1]
acf[i + 1] = arcoefs[i] * nu[i - 1] + prev.dot(acf[1 : -(n - i)][::-1])
acf[0] = 1
return arcoefs, acf | Levinson-Durbin algorithm that returns the acf and ar coefficients.
Parameters
----------
pacf : array_like
Partial autocorrelation array for lags 0, 1, ... p.
nlags : int, optional
Number of lags in the AR model. If omitted, returns coefficients from
an AR(p) and the first p autocorrelations.
Returns
-------
arcoefs : ndarray
AR coefficients computed from the partial autocorrelations.
acf : ndarray
The acf computed from the partial autocorrelations. Array returned
contains the autocorrelations corresponding to lags 0, 1, ..., p.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer. | levinson_durbin_pacf | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def grangercausalitytests(x, maxlag, addconst=True, verbose=None):
"""
Four tests for granger non causality of 2 time series.
All four tests give similar results. `params_ftest` and `ssr_ftest` are
equivalent based on F test which is identical to lmtest:grangertest in R.
Parameters
----------
x : array_like
The data for testing whether the time series in the second column Granger
causes the time series in the first column. Missing values are not
supported.
maxlag : {int, Iterable[int]}
If an integer, computes the test for all lags up to maxlag. If an
iterable, computes the tests only for the lags in maxlag.
addconst : bool
Include a constant in the model.
verbose : bool
Print results. Deprecated
.. deprecated: 0.14
verbose is deprecated and will be removed after 0.15 is released
Returns
-------
dict
All test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
test statistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
`params_ftest`, `ssr_ftest` are based on F distribution
`ssr_chi2test`, `lrtest` are based on chi-square distribution
References
----------
.. [1] https://en.wikipedia.org/wiki/Granger_causality
.. [2] Greene: Econometric Analysis
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.tsa.stattools import grangercausalitytests
>>> import numpy as np
>>> data = sm.datasets.macrodata.load_pandas()
>>> data = data.data[["realgdp", "realcons"]].pct_change().dropna()
All lags up to 4
>>> gc_res = grangercausalitytests(data, 4)
Only lag 4
>>> gc_res = grangercausalitytests(data, [4])
"""
x = array_like(x, "x", ndim=2)
if not np.isfinite(x).all():
raise ValueError("x contains NaN or inf values.")
addconst = bool_like(addconst, "addconst")
if verbose is not None:
verbose = bool_like(verbose, "verbose")
warnings.warn(
"verbose is deprecated since functions should not print results",
FutureWarning,
)
else:
verbose = True # old default
try:
maxlag = int_like(maxlag, "maxlag")
if maxlag <= 0:
raise ValueError("maxlag must be a positive integer")
lags = np.arange(1, maxlag + 1)
except TypeError:
lags = np.array([int(lag) for lag in maxlag])
maxlag = lags.max()
if lags.min() <= 0 or lags.size == 0:
raise ValueError(
"maxlag must be a non-empty list containing only " "positive integers"
)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError(
"Insufficient observations. Maximum allowable "
"lag is {}".format(int((x.shape[0] - int(addconst)) / 3) - 1)
)
resli = {}
for mlg in lags:
result = {}
if verbose:
print("\nGranger Causality")
print("number of lags (no zero)", mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim="both", dropex=1)
# add constant
if addconst:
dtaown = add_constant(dta[:, 1 : (mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
if (
dtajoint.shape[1] == (dta.shape[1] - 1)
or (dtajoint.max(0) == dtajoint.min(0)).sum() != 1
):
raise InfeasibleTestError(
"The x values include a column with constant values and so"
" the test statistic cannot be computed."
)
else:
raise NotImplementedError("Not Implemented")
# dtaown = dta[:, 1:mxlg]
# dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
# print results
# for ssr based tests see:
# http://support.sas.com/rnd/app/examples/ets/granger/index.htm
# the other tests are made-up
# Granger Causality test using ssr (F statistic)
if res2djoint.model.k_constant:
tss = res2djoint.centered_tss
else:
tss = res2djoint.uncentered_tss
if (
tss == 0
or res2djoint.ssr == 0
or np.isnan(res2djoint.rsquared)
or (res2djoint.ssr / tss) < np.finfo(float).eps
or res2djoint.params.shape[0] != dtajoint.shape[1]
):
raise InfeasibleTestError(
"The Granger causality test statistic cannot be computed "
"because the VAR has a perfect fit of the data."
)
fgc1 = (
(res2down.ssr - res2djoint.ssr)
/ res2djoint.ssr
/ mxlg
* res2djoint.df_resid
)
if verbose:
print(
"ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,"
" df_num=%d"
% (
fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid,
mxlg,
)
)
result["ssr_ftest"] = (
fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid,
mxlg,
)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print(
"ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, "
"df=%d" % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
)
result["ssr_chi2test"] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
# likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print(
"likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d"
% (lr, stats.chi2.sf(lr, mxlg), mxlg)
)
result["lrtest"] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack(
(np.zeros((mxlg, mxlg)), np.eye(mxlg, mxlg), np.zeros((mxlg, 1)))
)
ftres = res2djoint.f_test(rconstr)
if verbose:
print(
"parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,"
" df_num=%d"
% (ftres.fvalue, ftres.pvalue, ftres.df_denom, ftres.df_num)
)
result["params_ftest"] = (
np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom,
ftres.df_num,
)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli | Four tests for granger non causality of 2 time series.
All four tests give similar results. `params_ftest` and `ssr_ftest` are
equivalent based on F test which is identical to lmtest:grangertest in R.
Parameters
----------
x : array_like
The data for testing whether the time series in the second column Granger
causes the time series in the first column. Missing values are not
supported.
maxlag : {int, Iterable[int]}
If an integer, computes the test for all lags up to maxlag. If an
iterable, computes the tests only for the lags in maxlag.
addconst : bool
Include a constant in the model.
verbose : bool
Print results. Deprecated
.. deprecated: 0.14
verbose is deprecated and will be removed after 0.15 is released
Returns
-------
dict
All test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
test statistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
`params_ftest`, `ssr_ftest` are based on F distribution
`ssr_chi2test`, `lrtest` are based on chi-square distribution
References
----------
.. [1] https://en.wikipedia.org/wiki/Granger_causality
.. [2] Greene: Econometric Analysis
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.tsa.stattools import grangercausalitytests
>>> import numpy as np
>>> data = sm.datasets.macrodata.load_pandas()
>>> data = data.data[["realgdp", "realcons"]].pct_change().dropna()
All lags up to 4
>>> gc_res = grangercausalitytests(data, 4)
Only lag 4
>>> gc_res = grangercausalitytests(data, [4]) | grangercausalitytests | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def coint(
y0,
y1,
trend="c",
method="aeg",
maxlag=None,
autolag: str | None = "aic",
return_results=None,
):
"""
Test for no-cointegration of a univariate equation.
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
"aic". Use `autolag=None` to avoid the lag search.
Parameters
----------
y0 : array_like
The first element in cointegrated system. Must be 1-d.
y1 : array_like
The remaining elements in cointegrated system.
trend : str {"c", "ct"}
The trend term included in regression for cointegrating equation.
* "c" : constant.
* "ct" : constant and linear trend.
* also available quadratic trend "ctt", and no constant "n".
method : {"aeg"}
Only "aeg" (augmented Engle-Granger) is available.
maxlag : None or int
Argument for `adfuller`, largest or given number of lags.
autolag : str
Argument for `adfuller`, lag selection criterion.
* If None, then maxlag lags are used without lag search.
* If "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion.
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
return_results : bool
For future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned. Set `return_results=False` to
avoid future changes in return.
Returns
-------
coint_t : float
The t-statistic of unit-root test on residuals.
pvalue : float
MacKinnon"s approximate, asymptotic p-value based on MacKinnon (1994).
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
Auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
.. [1] MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions
for Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
.. [2] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen"s University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
y0 = array_like(y0, "y0")
y1 = array_like(y1, "y1", ndim=2)
trend = string_like(trend, "trend", options=("c", "n", "ct", "ctt"))
string_like(method, "method", options=("aeg",))
maxlag = int_like(maxlag, "maxlag", optional=True)
autolag = string_like(
autolag, "autolag", optional=True, options=("aic", "bic", "t-stat")
)
return_results = bool_like(return_results, "return_results", optional=True)
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == "n":
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag, regression="n")
else:
warnings.warn(
"y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.",
CollinearityWarning,
stacklevel=2,
)
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == "n":
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I do not know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit | Test for no-cointegration of a univariate equation.
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
"aic". Use `autolag=None` to avoid the lag search.
Parameters
----------
y0 : array_like
The first element in cointegrated system. Must be 1-d.
y1 : array_like
The remaining elements in cointegrated system.
trend : str {"c", "ct"}
The trend term included in regression for cointegrating equation.
* "c" : constant.
* "ct" : constant and linear trend.
* also available quadratic trend "ctt", and no constant "n".
method : {"aeg"}
Only "aeg" (augmented Engle-Granger) is available.
maxlag : None or int
Argument for `adfuller`, largest or given number of lags.
autolag : str
Argument for `adfuller`, lag selection criterion.
* If None, then maxlag lags are used without lag search.
* If "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion.
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
return_results : bool
For future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned. Set `return_results=False` to
avoid future changes in return.
Returns
-------
coint_t : float
The t-statistic of unit-root test on residuals.
pvalue : float
MacKinnon"s approximate, asymptotic p-value based on MacKinnon (1994).
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
Auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
.. [1] MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions
for Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
.. [2] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen"s University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html | coint | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def has_missing(data):
"""
Returns True if "data" contains missing entries, otherwise False
"""
return np.isnan(np.sum(data)) | Returns True if "data" contains missing entries, otherwise False | has_missing | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def kpss(
x,
regression: Literal["c", "ct"] = "c",
nlags: Literal["auto", "legacy"] | int = "auto",
store: bool = False,
) -> tuple[float, float, int, dict[str, float]]:
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
The data series to test.
regression : str{"c", "ct"}
The null hypothesis for the KPSS test.
* "c" : The data is stationary around a constant (default).
* "ct" : The data is stationary around a trend.
nlags : {str, int}, optional
Indicates the number of lags to be used. If "auto" (default), lags
is calculated using the data-dependent method of Hobijn et al. (1998).
See also Andrews (1991), Newey & West (1994), and Schwert (1989). If
set to "legacy", uses int(12 * (n / 100)**(1 / 4)) , as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic.
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter.
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes.
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is "legacy",
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
See the notebook `Stationarity and detrending (ADF/KPSS)
<../examples/notebooks/generated/stationarity_detrending_adf_kpss.html>`__
for an overview.
References
----------
.. [1] Andrews, D.W.K. (1991). Heteroskedasticity and autocorrelation
consistent covariance matrix estimation. Econometrica, 59: 817-858.
.. [2] Hobijn, B., Frances, B.H., & Ooms, M. (2004). Generalizations of the
KPSS-test for stationarity. Statistica Neerlandica, 52: 483-502.
.. [3] Kwiatkowski, D., Phillips, P.C.B., Schmidt, P., & Shin, Y. (1992).
Testing the null hypothesis of stationarity against the alternative of a
unit root. Journal of Econometrics, 54: 159-178.
.. [4] Newey, W.K., & West, K.D. (1994). Automatic lag selection in
covariance matrix estimation. Review of Economic Studies, 61: 631-653.
.. [5] Schwert, G. W. (1989). Tests for unit roots: A Monte Carlo
investigation. Journal of Business and Economic Statistics, 7 (2):
147-159.
"""
x = array_like(x, "x")
regression = string_like(regression, "regression", options=("c", "ct"))
store = bool_like(store, "store")
nobs = x.shape[0]
hypo = regression
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError(f"x of shape {x.shape} not understood")
if hypo == "ct":
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
else: # hypo == "c"
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
if nlags == "legacy":
nlags = int(np.ceil(12.0 * np.power(nobs / 100.0, 1 / 4.0)))
nlags = min(nlags, nobs - 1)
elif nlags == "auto" or nlags is None:
if nlags is None:
# TODO: Remove before 0.14 is released
warnings.warn(
"None is not a valid value for nlags. It must be an integer, "
"'auto' or 'legacy'. None will raise starting in 0.14",
FutureWarning,
stacklevel=2,
)
# autolag method of Hobijn et al. (1998)
nlags = _kpss_autolag(resids, nobs)
nlags = min(nlags, nobs - 1)
elif isinstance(nlags, str):
raise ValueError("nvals must be 'auto' or 'legacy' when not an int")
else:
nlags = int_like(nlags, "nlags", optional=False)
if nlags >= nobs:
raise ValueError(
f"lags ({nlags}) must be < number of observations ({nobs})"
)
pvals = [0.10, 0.05, 0.025, 0.01]
eta = np.sum(resids.cumsum() ** 2) / (nobs**2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, nlags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
warn_msg = """\
The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is {direction} than the p-value returned.
"""
if p_value == pvals[-1]:
warnings.warn(
warn_msg.format(direction="smaller"),
InterpolationWarning,
stacklevel=2,
)
elif p_value == pvals[0]:
warnings.warn(
warn_msg.format(direction="greater"),
InterpolationWarning,
stacklevel=2,
)
crit_dict = {"10%": crit[0], "5%": crit[1], "2.5%": crit[2], "1%": crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = nlags
rstore.nobs = nobs
stationary_type = "level" if hypo == "c" else "trend"
rstore.H0 = f"The series is {stationary_type} stationary"
rstore.HA = f"The series is not {stationary_type} stationary"
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, nlags, crit_dict | Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
The data series to test.
regression : str{"c", "ct"}
The null hypothesis for the KPSS test.
* "c" : The data is stationary around a constant (default).
* "ct" : The data is stationary around a trend.
nlags : {str, int}, optional
Indicates the number of lags to be used. If "auto" (default), lags
is calculated using the data-dependent method of Hobijn et al. (1998).
See also Andrews (1991), Newey & West (1994), and Schwert (1989). If
set to "legacy", uses int(12 * (n / 100)**(1 / 4)) , as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic.
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter.
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes.
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is "legacy",
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
See the notebook `Stationarity and detrending (ADF/KPSS)
<../examples/notebooks/generated/stationarity_detrending_adf_kpss.html>`__
for an overview.
References
----------
.. [1] Andrews, D.W.K. (1991). Heteroskedasticity and autocorrelation
consistent covariance matrix estimation. Econometrica, 59: 817-858.
.. [2] Hobijn, B., Frances, B.H., & Ooms, M. (2004). Generalizations of the
KPSS-test for stationarity. Statistica Neerlandica, 52: 483-502.
.. [3] Kwiatkowski, D., Phillips, P.C.B., Schmidt, P., & Shin, Y. (1992).
Testing the null hypothesis of stationarity against the alternative of a
unit root. Journal of Econometrics, 54: 159-178.
.. [4] Newey, W.K., & West, K.D. (1994). Automatic lag selection in
covariance matrix estimation. Review of Economic Studies, 61: 631-653.
.. [5] Schwert, G. W. (1989). Tests for unit roots: A Monte Carlo
investigation. Journal of Business and Economic Statistics, 7 (2):
147-159. | kpss | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = np.sum(resids**2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[: nobs - i])
s_hat += 2 * resids_prod * (1.0 - (i / (lags + 1.0)))
return s_hat / nobs | Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance. | _sigma_est_kpss | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def _kpss_autolag(resids, nobs):
"""
Computes the number of lags for covariance matrix estimation in KPSS test
using method of Hobijn et al (1998). See also Andrews (1991), Newey & West
(1994), and Schwert (1989). Assumes Bartlett / Newey-West kernel.
"""
covlags = int(np.power(nobs, 2.0 / 9.0))
s0 = np.sum(resids**2) / nobs
s1 = 0
for i in range(1, covlags + 1):
resids_prod = np.dot(resids[i:], resids[: nobs - i])
resids_prod /= nobs / 2.0
s0 += resids_prod
s1 += i * resids_prod
s_hat = s1 / s0
pwr = 1.0 / 3.0
gamma_hat = 1.1447 * np.power(s_hat * s_hat, pwr)
autolags = int(gamma_hat * np.power(nobs, pwr))
return autolags | Computes the number of lags for covariance matrix estimation in KPSS test
using method of Hobijn et al (1998). See also Andrews (1991), Newey & West
(1994), and Schwert (1989). Assumes Bartlett / Newey-West kernel. | _kpss_autolag | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def range_unit_root_test(x, store=False):
"""
Range unit-root test for stationarity.
Computes the Range Unit-Root (RUR) test for the null
hypothesis that x is stationary.
Parameters
----------
x : array_like, 1d
The data series to test.
store : bool
If True, then a result instance is returned additionally to
the RUR statistic (default is False).
Returns
-------
rur_stat : float
The RUR test statistic.
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Aparicio et al. (2006), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Aparicio et al. (2006).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes.
Notes
-----
The p-values are interpolated from
Table 1 of Aparicio et al. (2006). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
.. [1] Aparicio, F., Escribano A., Sipols, A.E. (2006). Range Unit-Root (RUR)
tests: robust against nonlinearities, error distributions, structural breaks
and outliers. Journal of Time Series Analysis, 27 (4): 545-576.
"""
x = array_like(x, "x")
store = bool_like(store, "store")
nobs = x.shape[0]
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError(f"x of shape {x.shape} not understood")
# Table from [1] has been replicated using 200,000 samples
# Critical values for new n_obs values have been identified
pvals = [0.01, 0.025, 0.05, 0.10, 0.90, 0.95]
n = np.array([25, 50, 100, 150, 200, 250, 500, 1000, 2000, 3000, 4000, 5000])
crit = np.array(
[
[0.6626, 0.8126, 0.9192, 1.0712, 2.4863, 2.7312],
[0.7977, 0.9274, 1.0478, 1.1964, 2.6821, 2.9613],
[0.9070, 1.0243, 1.1412, 1.2888, 2.8317, 3.1393],
[0.9543, 1.0768, 1.1869, 1.3294, 2.8915, 3.2049],
[0.9833, 1.0984, 1.2101, 1.3494, 2.9308, 3.2482],
[0.9982, 1.1137, 1.2242, 1.3632, 2.9571, 3.2842],
[1.0494, 1.1643, 1.2712, 1.4076, 3.0207, 3.3584],
[1.0846, 1.1959, 1.2988, 1.4344, 3.0653, 3.4073],
[1.1121, 1.2200, 1.3230, 1.4556, 3.0948, 3.4439],
[1.1204, 1.2295, 1.3303, 1.4656, 3.1054, 3.4632],
[1.1309, 1.2347, 1.3378, 1.4693, 3.1165, 3.4717],
[1.1377, 1.2402, 1.3408, 1.4729, 3.1252, 3.4807],
]
)
# Interpolation for nobs
inter_crit = np.zeros((1, crit.shape[1]))
for i in range(crit.shape[1]):
f = interp1d(n, crit[:, i])
inter_crit[0, i] = f(nobs)
# Calculate RUR stat
xs = pd.Series(x)
exp_max = xs.expanding(1).max().shift(1)
exp_min = xs.expanding(1).min().shift(1)
count = (xs > exp_max).sum() + (xs < exp_min).sum()
rur_stat = count / np.sqrt(len(x))
k = len(pvals) - 1
for i in range(len(pvals) - 1, -1, -1):
if rur_stat < inter_crit[0, i]:
k = i
else:
break
p_value = pvals[k]
warn_msg = """\
The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is {direction} than the p-value returned.
"""
direction = ""
if p_value == pvals[-1]:
direction = "smaller"
elif p_value == pvals[0]:
direction = "larger"
if direction:
warnings.warn(
warn_msg.format(direction=direction),
InterpolationWarning,
stacklevel=2,
)
crit_dict = {
"10%": inter_crit[0, 3],
"5%": inter_crit[0, 2],
"2.5%": inter_crit[0, 1],
"1%": inter_crit[0, 0],
}
if store:
rstore = ResultsStore()
rstore.nobs = nobs
rstore.H0 = "The series is not stationary"
rstore.HA = "The series is stationary"
return rur_stat, p_value, crit_dict, rstore
else:
return rur_stat, p_value, crit_dict | Range unit-root test for stationarity.
Computes the Range Unit-Root (RUR) test for the null
hypothesis that x is stationary.
Parameters
----------
x : array_like, 1d
The data series to test.
store : bool
If True, then a result instance is returned additionally to
the RUR statistic (default is False).
Returns
-------
rur_stat : float
The RUR test statistic.
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Aparicio et al. (2006), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Aparicio et al. (2006).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes.
Notes
-----
The p-values are interpolated from
Table 1 of Aparicio et al. (2006). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
.. [1] Aparicio, F., Escribano A., Sipols, A.E. (2006). Range Unit-Root (RUR)
tests: robust against nonlinearities, error distributions, structural breaks
and outliers. Journal of Time Series Analysis, 27 (4): 545-576. | range_unit_root_test | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def __init__(self):
"""
Critical values for the three different models specified for the
Zivot-Andrews unit-root test.
Notes
-----
The p-values are generated through Monte Carlo simulation using
100,000 replications and 2000 data points.
"""
self._za_critical_values = {}
# constant-only model
self._c = (
(0.001, -6.78442),
(0.100, -5.83192),
(0.200, -5.68139),
(0.300, -5.58461),
(0.400, -5.51308),
(0.500, -5.45043),
(0.600, -5.39924),
(0.700, -5.36023),
(0.800, -5.33219),
(0.900, -5.30294),
(1.000, -5.27644),
(2.500, -5.03340),
(5.000, -4.81067),
(7.500, -4.67636),
(10.000, -4.56618),
(12.500, -4.48130),
(15.000, -4.40507),
(17.500, -4.33947),
(20.000, -4.28155),
(22.500, -4.22683),
(25.000, -4.17830),
(27.500, -4.13101),
(30.000, -4.08586),
(32.500, -4.04455),
(35.000, -4.00380),
(37.500, -3.96144),
(40.000, -3.92078),
(42.500, -3.88178),
(45.000, -3.84503),
(47.500, -3.80549),
(50.000, -3.77031),
(52.500, -3.73209),
(55.000, -3.69600),
(57.500, -3.65985),
(60.000, -3.62126),
(65.000, -3.54580),
(70.000, -3.46848),
(75.000, -3.38533),
(80.000, -3.29112),
(85.000, -3.17832),
(90.000, -3.04165),
(92.500, -2.95146),
(95.000, -2.83179),
(96.000, -2.76465),
(97.000, -2.68624),
(98.000, -2.57884),
(99.000, -2.40044),
(99.900, -1.88932),
)
self._za_critical_values["c"] = np.asarray(self._c)
# trend-only model
self._t = (
(0.001, -83.9094),
(0.100, -13.8837),
(0.200, -9.13205),
(0.300, -6.32564),
(0.400, -5.60803),
(0.500, -5.38794),
(0.600, -5.26585),
(0.700, -5.18734),
(0.800, -5.12756),
(0.900, -5.07984),
(1.000, -5.03421),
(2.500, -4.65634),
(5.000, -4.40580),
(7.500, -4.25214),
(10.000, -4.13678),
(12.500, -4.03765),
(15.000, -3.95185),
(17.500, -3.87945),
(20.000, -3.81295),
(22.500, -3.75273),
(25.000, -3.69836),
(27.500, -3.64785),
(30.000, -3.59819),
(32.500, -3.55146),
(35.000, -3.50522),
(37.500, -3.45987),
(40.000, -3.41672),
(42.500, -3.37465),
(45.000, -3.33394),
(47.500, -3.29393),
(50.000, -3.25316),
(52.500, -3.21244),
(55.000, -3.17124),
(57.500, -3.13211),
(60.000, -3.09204),
(65.000, -3.01135),
(70.000, -2.92897),
(75.000, -2.83614),
(80.000, -2.73893),
(85.000, -2.62840),
(90.000, -2.49611),
(92.500, -2.41337),
(95.000, -2.30820),
(96.000, -2.25797),
(97.000, -2.19648),
(98.000, -2.11320),
(99.000, -1.99138),
(99.900, -1.67466),
)
self._za_critical_values["t"] = np.asarray(self._t)
# constant + trend model
self._ct = (
(0.001, -38.17800),
(0.100, -6.43107),
(0.200, -6.07279),
(0.300, -5.95496),
(0.400, -5.86254),
(0.500, -5.77081),
(0.600, -5.72541),
(0.700, -5.68406),
(0.800, -5.65163),
(0.900, -5.60419),
(1.000, -5.57556),
(2.500, -5.29704),
(5.000, -5.07332),
(7.500, -4.93003),
(10.000, -4.82668),
(12.500, -4.73711),
(15.000, -4.66020),
(17.500, -4.58970),
(20.000, -4.52855),
(22.500, -4.47100),
(25.000, -4.42011),
(27.500, -4.37387),
(30.000, -4.32705),
(32.500, -4.28126),
(35.000, -4.23793),
(37.500, -4.19822),
(40.000, -4.15800),
(42.500, -4.11946),
(45.000, -4.08064),
(47.500, -4.04286),
(50.000, -4.00489),
(52.500, -3.96837),
(55.000, -3.93200),
(57.500, -3.89496),
(60.000, -3.85577),
(65.000, -3.77795),
(70.000, -3.69794),
(75.000, -3.61852),
(80.000, -3.52485),
(85.000, -3.41665),
(90.000, -3.28527),
(92.500, -3.19724),
(95.000, -3.08769),
(96.000, -3.03088),
(97.000, -2.96091),
(98.000, -2.85581),
(99.000, -2.71015),
(99.900, -2.28767),
)
self._za_critical_values["ct"] = np.asarray(self._ct) | Critical values for the three different models specified for the
Zivot-Andrews unit-root test.
Notes
-----
The p-values are generated through Monte Carlo simulation using
100,000 replications and 2000 data points. | __init__ | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def _za_crit(self, stat, model="c"):
"""
Linear interpolation for Zivot-Andrews p-values and critical values
Parameters
----------
stat : float
The ZA test statistic
model : {"c","t","ct"}
The model used when computing the ZA statistic. "c" is default.
Returns
-------
pvalue : float
The interpolated p-value
cvdict : dict
Critical values for the test statistic at the 1%, 5%, and 10%
levels
Notes
-----
The p-values are linear interpolated from the quantiles of the
simulated ZA test statistic distribution
"""
table = self._za_critical_values[model]
pcnts = table[:, 0]
stats = table[:, 1]
# ZA cv table contains quantiles multiplied by 100
pvalue = np.interp(stat, stats, pcnts) / 100.0
cv = [1.0, 5.0, 10.0]
crit_value = np.interp(cv, pcnts, stats)
cvdict = {
"1%": crit_value[0],
"5%": crit_value[1],
"10%": crit_value[2],
}
return pvalue, cvdict | Linear interpolation for Zivot-Andrews p-values and critical values
Parameters
----------
stat : float
The ZA test statistic
model : {"c","t","ct"}
The model used when computing the ZA statistic. "c" is default.
Returns
-------
pvalue : float
The interpolated p-value
cvdict : dict
Critical values for the test statistic at the 1%, 5%, and 10%
levels
Notes
-----
The p-values are linear interpolated from the quantiles of the
simulated ZA test statistic distribution | _za_crit | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def _quick_ols(self, endog, exog):
"""
Minimal implementation of LS estimator for internal use
"""
xpxi = np.linalg.inv(exog.T.dot(exog))
xpy = exog.T.dot(endog)
nobs, k_exog = exog.shape
b = xpxi.dot(xpy)
e = endog - exog.dot(b)
sigma2 = e.T.dot(e) / (nobs - k_exog)
return b / np.sqrt(np.diag(sigma2 * xpxi)) | Minimal implementation of LS estimator for internal use | _quick_ols | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def _format_regression_data(self, series, nobs, const, trend, cols, lags):
"""
Create the endog/exog data for the auxiliary regressions
from the original (standardized) series under test.
"""
# first-diff y and standardize for numerical stability
endog = np.diff(series, axis=0)
endog /= np.sqrt(endog.T.dot(endog))
series = series / np.sqrt(series.T.dot(series))
# reserve exog space
exog = np.zeros((endog[lags:].shape[0], cols + lags))
exog[:, 0] = const
# lagged y and dy
exog[:, cols - 1] = series[lags : (nobs - 1)]
exog[:, cols:] = lagmat(endog, lags, trim="none")[lags : exog.shape[0] + lags]
return endog, exog | Create the endog/exog data for the auxiliary regressions
from the original (standardized) series under test. | _format_regression_data | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def _update_regression_exog(
self, exog, regression, period, nobs, const, trend, cols, lags
):
"""
Update the exog array for the next regression.
"""
cutoff = period - (lags + 1)
if regression != "t":
exog[:cutoff, 1] = 0
exog[cutoff:, 1] = const
exog[:, 2] = trend[(lags + 2) : (nobs + 1)]
if regression == "ct":
exog[:cutoff, 3] = 0
exog[cutoff:, 3] = trend[1 : (nobs - period + 1)]
else:
exog[:, 1] = trend[(lags + 2) : (nobs + 1)]
exog[: (cutoff - 1), 2] = 0
exog[(cutoff - 1) :, 2] = trend[0 : (nobs - period + 1)]
return exog | Update the exog array for the next regression. | _update_regression_exog | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def run(self, x, trim=0.15, maxlag=None, regression="c", autolag="AIC"):
"""
Zivot-Andrews structural-break unit-root test.
The Zivot-Andrews test tests for a unit root in a univariate process
in the presence of serial correlation and a single structural break.
Parameters
----------
x : array_like
The data series to test.
trim : float
The percentage of series at begin/end to exclude from break-period
calculation in range [0, 0.333] (default=0.15).
maxlag : int
The maximum lag which is included in test, default is
12*(nobs/100)^{1/4} (Schwert, 1989).
regression : {"c","t","ct"}
Constant and trend order to include in regression.
* "c" : constant only (default).
* "t" : trend only.
* "ct" : constant and trend.
autolag : {"AIC", "BIC", "t-stat", None}
The method to select the lag length when using automatic selection.
* if None, then maxlag lags are used,
* if "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion,
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
Returns
-------
zastat : float
The test statistic.
pvalue : float
The pvalue based on MC-derived critical values.
cvdict : dict
The critical values for the test statistic at the 1%, 5%, and 10%
levels.
baselag : int
The number of lags used for period regressions.
bpidx : int
The index of x corresponding to endogenously calculated break period
with values in the range [0..nobs-1].
Notes
-----
H0 = unit root with a single structural break
Algorithm follows Baum (2004/2015) approximation to original
Zivot-Andrews method. Rather than performing an autolag regression at
each candidate break period (as per the original paper), a single
autolag regression is run up-front on the base model (constant + trend
with no dummies) to determine the best lag length. This lag length is
then used for all subsequent break-period regressions. This results in
significant run time reduction but also slightly more pessimistic test
statistics than the original Zivot-Andrews method, although no attempt
has been made to characterize the size/power trade-off.
References
----------
.. [1] Baum, C.F. (2004). ZANDREWS: Stata module to calculate
Zivot-Andrews unit root test in presence of structural break,"
Statistical Software Components S437301, Boston College Department
of Economics, revised 2015.
.. [2] Schwert, G.W. (1989). Tests for unit roots: A Monte Carlo
investigation. Journal of Business & Economic Statistics, 7:
147-159.
.. [3] Zivot, E., and Andrews, D.W.K. (1992). Further evidence on the
great crash, the oil-price shock, and the unit-root hypothesis.
Journal of Business & Economic Studies, 10: 251-270.
"""
x = array_like(x, "x", dtype=np.double, ndim=1)
trim = float_like(trim, "trim")
maxlag = int_like(maxlag, "maxlag", optional=True)
regression = string_like(regression, "regression", options=("c", "t", "ct"))
autolag = string_like(
autolag, "autolag", options=("aic", "bic", "t-stat"), optional=True
)
if trim < 0 or trim > (1.0 / 3.0):
raise ValueError("trim value must be a float in range [0, 1/3)")
nobs = x.shape[0]
if autolag:
adf_res = adfuller(x, maxlag=maxlag, regression="ct", autolag=autolag)
baselags = adf_res[2]
elif maxlag:
baselags = maxlag
else:
baselags = int(12.0 * np.power(nobs / 100.0, 1 / 4.0))
trimcnt = int(nobs * trim)
start_period = trimcnt
end_period = nobs - trimcnt
if regression == "ct":
basecols = 5
else:
basecols = 4
# normalize constant and trend terms for stability
c_const = 1 / np.sqrt(nobs)
t_const = np.arange(1.0, nobs + 2)
t_const *= np.sqrt(3) / nobs ** (3 / 2)
# format the auxiliary regression data
endog, exog = self._format_regression_data(
x, nobs, c_const, t_const, basecols, baselags
)
# iterate through the time periods
stats = np.full(end_period + 1, np.inf)
for bp in range(start_period + 1, end_period + 1):
# update intercept dummy / trend / trend dummy
exog = self._update_regression_exog(
exog,
regression,
bp,
nobs,
c_const,
t_const,
basecols,
baselags,
)
# check exog rank on first iteration
if bp == start_period + 1:
o = OLS(endog[baselags:], exog, hasconst=1).fit()
if o.df_model < exog.shape[1] - 1:
raise ValueError(
"ZA: auxiliary exog matrix is not full rank.\n"
" cols (exc intercept) = {} rank = {}".format(
exog.shape[1] - 1, o.df_model
)
)
stats[bp] = o.tvalues[basecols - 1]
else:
stats[bp] = self._quick_ols(endog[baselags:], exog)[basecols - 1]
# return best seen
zastat = np.min(stats)
bpidx = np.argmin(stats) - 1
crit = self._za_crit(zastat, regression)
pval = crit[0]
cvdict = crit[1]
return zastat, pval, cvdict, baselags, bpidx | Zivot-Andrews structural-break unit-root test.
The Zivot-Andrews test tests for a unit root in a univariate process
in the presence of serial correlation and a single structural break.
Parameters
----------
x : array_like
The data series to test.
trim : float
The percentage of series at begin/end to exclude from break-period
calculation in range [0, 0.333] (default=0.15).
maxlag : int
The maximum lag which is included in test, default is
12*(nobs/100)^{1/4} (Schwert, 1989).
regression : {"c","t","ct"}
Constant and trend order to include in regression.
* "c" : constant only (default).
* "t" : trend only.
* "ct" : constant and trend.
autolag : {"AIC", "BIC", "t-stat", None}
The method to select the lag length when using automatic selection.
* if None, then maxlag lags are used,
* if "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion,
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
Returns
-------
zastat : float
The test statistic.
pvalue : float
The pvalue based on MC-derived critical values.
cvdict : dict
The critical values for the test statistic at the 1%, 5%, and 10%
levels.
baselag : int
The number of lags used for period regressions.
bpidx : int
The index of x corresponding to endogenously calculated break period
with values in the range [0..nobs-1].
Notes
-----
H0 = unit root with a single structural break
Algorithm follows Baum (2004/2015) approximation to original
Zivot-Andrews method. Rather than performing an autolag regression at
each candidate break period (as per the original paper), a single
autolag regression is run up-front on the base model (constant + trend
with no dummies) to determine the best lag length. This lag length is
then used for all subsequent break-period regressions. This results in
significant run time reduction but also slightly more pessimistic test
statistics than the original Zivot-Andrews method, although no attempt
has been made to characterize the size/power trade-off.
References
----------
.. [1] Baum, C.F. (2004). ZANDREWS: Stata module to calculate
Zivot-Andrews unit root test in presence of structural break,"
Statistical Software Components S437301, Boston College Department
of Economics, revised 2015.
.. [2] Schwert, G.W. (1989). Tests for unit roots: A Monte Carlo
investigation. Journal of Business & Economic Statistics, 7:
147-159.
.. [3] Zivot, E., and Andrews, D.W.K. (1992). Further evidence on the
great crash, the oil-price shock, and the unit-root hypothesis.
Journal of Business & Economic Studies, 10: 251-270. | run | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_stattools.py | BSD-3-Clause |
def __init__(self):
"""
Asymptotic critical values for the two different models specified
for the Leybourne-McCabe stationarity test. Asymptotic CVs are the
same as the asymptotic CVs for the KPSS stationarity test.
Notes
-----
The p-values are generated through Monte Carlo simulation using
1,000,000 replications and 10,000 data points.
"""
self.__leybourne_critical_values = {
# constant-only model
"c": statsmodels.tsa._leybourne.c,
# constant-trend model
"ct": statsmodels.tsa._leybourne.ct,
} | Asymptotic critical values for the two different models specified
for the Leybourne-McCabe stationarity test. Asymptotic CVs are the
same as the asymptotic CVs for the KPSS stationarity test.
Notes
-----
The p-values are generated through Monte Carlo simulation using
1,000,000 replications and 10,000 data points. | __init__ | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_leybourne.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_leybourne.py | BSD-3-Clause |
def __leybourne_crit(self, stat, model="c"):
"""
Linear interpolation for Leybourne p-values and critical values
Parameters
----------
stat : float
The Leybourne-McCabe test statistic
model : {'c','ct'}
The model used when computing the test statistic. 'c' is default.
Returns
-------
pvalue : float
The interpolated p-value
cvdict : dict
Critical values for the test statistic at the 1%, 5%, and 10%
levels
Notes
-----
The p-values are linear interpolated from the quantiles of the
simulated Leybourne-McCabe (KPSS) test statistic distribution
"""
table = self.__leybourne_critical_values[model]
# reverse the order
y = table[:, 0]
x = table[:, 1]
# LM cv table contains quantiles multiplied by 100
pvalue = np.interp(stat, x, y) / 100.0
cv = [1.0, 5.0, 10.0]
crit_value = np.interp(cv, np.flip(y), np.flip(x))
cvdict = {"1%": crit_value[0], "5%": crit_value[1], "10%": crit_value[2]}
return pvalue, cvdict | Linear interpolation for Leybourne p-values and critical values
Parameters
----------
stat : float
The Leybourne-McCabe test statistic
model : {'c','ct'}
The model used when computing the test statistic. 'c' is default.
Returns
-------
pvalue : float
The interpolated p-value
cvdict : dict
Critical values for the test statistic at the 1%, 5%, and 10%
levels
Notes
-----
The p-values are linear interpolated from the quantiles of the
simulated Leybourne-McCabe (KPSS) test statistic distribution | __leybourne_crit | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_leybourne.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_leybourne.py | BSD-3-Clause |
def _tsls_arima(self, x, arlags, model):
"""
Two-stage least squares approach for estimating ARIMA(p, 1, 1)
parameters as an alternative to MLE estimation in the case of
solver non-convergence
Parameters
----------
x : array_like
data series
arlags : int
AR(p) order
model : {'c','ct'}
Constant and trend order to include in regression
* 'c' : constant only
* 'ct' : constant and trend
Returns
-------
arparams : int
AR(1) coefficient plus constant
theta : int
MA(1) coefficient
olsfit.resid : ndarray
residuals from second-stage regression
"""
endog = np.diff(x, axis=0)
exog = lagmat(endog, arlags, trim="both")
# add constant if requested
if model == "ct":
exog = add_constant(exog)
# remove extra terms from front of endog
endog = endog[arlags:]
if arlags > 0:
resids = lagmat(OLS(endog, exog).fit().resid, 1, trim="forward")
else:
resids = lagmat(-endog, 1, trim="forward")
# add negated residuals column to exog as MA(1) term
exog = np.append(exog, -resids, axis=1)
olsfit = OLS(endog, exog).fit()
if model == "ct":
arparams = olsfit.params[1 : (len(olsfit.params) - 1)]
else:
arparams = olsfit.params[0 : (len(olsfit.params) - 1)]
theta = olsfit.params[len(olsfit.params) - 1]
return arparams, theta, olsfit.resid | Two-stage least squares approach for estimating ARIMA(p, 1, 1)
parameters as an alternative to MLE estimation in the case of
solver non-convergence
Parameters
----------
x : array_like
data series
arlags : int
AR(p) order
model : {'c','ct'}
Constant and trend order to include in regression
* 'c' : constant only
* 'ct' : constant and trend
Returns
-------
arparams : int
AR(1) coefficient plus constant
theta : int
MA(1) coefficient
olsfit.resid : ndarray
residuals from second-stage regression | _tsls_arima | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_leybourne.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_leybourne.py | BSD-3-Clause |
def _autolag(self, x):
"""
Empirical method for Leybourne-McCabe auto AR lag detection.
Set number of AR lags equal to the first PACF falling within the
95% confidence interval. Maximum nuber of AR lags is limited to
the smaller of 10 or 1/2 series length. Minimum is zero lags.
Parameters
----------
x : array_like
data series
Returns
-------
arlags : int
AR(p) order
"""
p = pacf(x, nlags=min(len(x) // 2, 10), method="ols")
ci = 1.960 / np.sqrt(len(x))
arlags = max(
0, ([n - 1 for n, i in enumerate(p) if abs(i) < ci] + [len(p) - 1])[0]
)
return arlags | Empirical method for Leybourne-McCabe auto AR lag detection.
Set number of AR lags equal to the first PACF falling within the
95% confidence interval. Maximum nuber of AR lags is limited to
the smaller of 10 or 1/2 series length. Minimum is zero lags.
Parameters
----------
x : array_like
data series
Returns
-------
arlags : int
AR(p) order | _autolag | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_leybourne.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_leybourne.py | BSD-3-Clause |
def run(self, x, arlags=1, regression="c", method="mle", varest="var94"):
"""
Leybourne-McCabe stationarity test
The Leybourne-McCabe test can be used to test for stationarity in a
univariate process.
Parameters
----------
x : array_like
data series
arlags : int
number of autoregressive terms to include, default=None
regression : {'c','ct'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
method : {'mle','ols'}
Method used to estimate ARIMA(p, 1, 1) filter model
* 'mle' : condition sum of squares maximum likelihood
* 'ols' : two-stage least squares (default)
varest : {'var94','var99'}
Method used for residual variance estimation
* 'var94' : method used in original Leybourne-McCabe paper (1994)
(default)
* 'var99' : method used in follow-up paper (1999)
Returns
-------
lmstat : float
test statistic
pvalue : float
based on MC-derived critical values
arlags : int
AR(p) order used to create the filtered series
cvdict : dict
critical values for the test statistic at the 1%, 5%, and 10%
levels
Notes
-----
H0 = series is stationary
Basic process is to create a filtered series which removes the AR(p)
effects from the series under test followed by an auxiliary regression
similar to that of Kwiatkowski et al (1992). The AR(p) coefficients
are obtained by estimating an ARIMA(p, 1, 1) model. Two methods are
provided for ARIMA estimation: MLE and two-stage least squares.
Two methods are provided for residual variance estimation used in the
calculation of the test statistic. The first method ('var94') is the
mean of the squared residuals from the filtered regression. The second
method ('var99') is the MA(1) coefficient times the mean of the squared
residuals from the ARIMA(p, 1, 1) filtering model.
An empirical autolag procedure is provided. In this context, the number
of lags is equal to the number of AR(p) terms used in the filtering
step. The number of AR(p) terms is set equal to the to the first PACF
falling within the 95% confidence interval. Maximum nuber of AR lags is
limited to 1/2 series length.
References
----------
Kwiatkowski, D., Phillips, P.C.B., Schmidt, P. & Shin, Y. (1992).
Testing the null hypothesis of stationarity against the alternative of
a unit root. Journal of Econometrics, 54: 159–178.
Leybourne, S.J., & McCabe, B.P.M. (1994). A consistent test for a
unit root. Journal of Business and Economic Statistics, 12: 157–166.
Leybourne, S.J., & McCabe, B.P.M. (1999). Modified stationarity tests
with data-dependent model-selection rules. Journal of Business and
Economic Statistics, 17: 264-270.
Schwert, G W. (1987). Effects of model specification on tests for unit
roots in macroeconomic data. Journal of Monetary Economics, 20: 73–103.
"""
if regression not in ["c", "ct"]:
raise ValueError("LM: regression option '%s' not understood" % regression)
if method not in ["mle", "ols"]:
raise ValueError("LM: method option '%s' not understood" % method)
if varest not in ["var94", "var99"]:
raise ValueError("LM: varest option '%s' not understood" % varest)
x = np.asarray(x)
if x.ndim > 2 or (x.ndim == 2 and x.shape[1] != 1):
raise ValueError(
"LM: x must be a 1d array or a 2d array with a single column"
)
x = np.reshape(x, (-1, 1))
# determine AR order if not specified
if arlags is None:
arlags = self._autolag(x)
elif not isinstance(arlags, int) or arlags < 0 or arlags > int(len(x) / 2):
raise ValueError(
"LM: arlags must be an integer in range [0..%s]" % str(int(len(x) / 2))
)
# estimate the reduced ARIMA(p, 1, 1) model
if method == "mle":
if regression == "ct":
reg = "t"
else:
reg = None
from statsmodels.tsa.arima.model import ARIMA
arima = ARIMA(
x, order=(arlags, 1, 1), trend=reg, enforce_invertibility=False
)
arfit = arima.fit()
resids = arfit.resid
arcoeffs = []
if arlags > 0:
arcoeffs = arfit.arparams
theta = arfit.maparams[0]
else:
arcoeffs, theta, resids = self._tsls_arima(x, arlags, model=regression)
# variance estimator from (1999) LM paper
var99 = abs(theta * np.sum(resids**2) / len(resids))
# create the filtered series:
# z(t) = x(t) - arcoeffs[0]*x(t-1) - ... - arcoeffs[p-1]*x(t-p)
z = np.full(len(x) - arlags, np.inf)
for i in range(len(z)):
z[i] = x[i + arlags, 0]
for j in range(len(arcoeffs)):
z[i] -= arcoeffs[j] * x[i + arlags - j - 1, 0]
# regress the filtered series against a constant and
# trend term (if requested)
if regression == "c":
resids = z - z.mean()
else:
resids = OLS(z, add_constant(np.arange(1, len(z) + 1))).fit().resid
# variance estimator from (1994) LM paper
var94 = np.sum(resids**2) / len(resids)
# compute test statistic with specified variance estimator
eta = np.sum(resids.cumsum() ** 2) / (len(resids) ** 2)
if varest == "var99":
lmstat = eta / var99
else:
lmstat = eta / var94
# calculate pval
lmpval, cvdict = self.__leybourne_crit(lmstat, regression)
return lmstat, lmpval, arlags, cvdict | Leybourne-McCabe stationarity test
The Leybourne-McCabe test can be used to test for stationarity in a
univariate process.
Parameters
----------
x : array_like
data series
arlags : int
number of autoregressive terms to include, default=None
regression : {'c','ct'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
method : {'mle','ols'}
Method used to estimate ARIMA(p, 1, 1) filter model
* 'mle' : condition sum of squares maximum likelihood
* 'ols' : two-stage least squares (default)
varest : {'var94','var99'}
Method used for residual variance estimation
* 'var94' : method used in original Leybourne-McCabe paper (1994)
(default)
* 'var99' : method used in follow-up paper (1999)
Returns
-------
lmstat : float
test statistic
pvalue : float
based on MC-derived critical values
arlags : int
AR(p) order used to create the filtered series
cvdict : dict
critical values for the test statistic at the 1%, 5%, and 10%
levels
Notes
-----
H0 = series is stationary
Basic process is to create a filtered series which removes the AR(p)
effects from the series under test followed by an auxiliary regression
similar to that of Kwiatkowski et al (1992). The AR(p) coefficients
are obtained by estimating an ARIMA(p, 1, 1) model. Two methods are
provided for ARIMA estimation: MLE and two-stage least squares.
Two methods are provided for residual variance estimation used in the
calculation of the test statistic. The first method ('var94') is the
mean of the squared residuals from the filtered regression. The second
method ('var99') is the MA(1) coefficient times the mean of the squared
residuals from the ARIMA(p, 1, 1) filtering model.
An empirical autolag procedure is provided. In this context, the number
of lags is equal to the number of AR(p) terms used in the filtering
step. The number of AR(p) terms is set equal to the to the first PACF
falling within the 95% confidence interval. Maximum nuber of AR lags is
limited to 1/2 series length.
References
----------
Kwiatkowski, D., Phillips, P.C.B., Schmidt, P. & Shin, Y. (1992).
Testing the null hypothesis of stationarity against the alternative of
a unit root. Journal of Econometrics, 54: 159–178.
Leybourne, S.J., & McCabe, B.P.M. (1994). A consistent test for a
unit root. Journal of Business and Economic Statistics, 12: 157–166.
Leybourne, S.J., & McCabe, B.P.M. (1999). Modified stationarity tests
with data-dependent model-selection rules. Journal of Business and
Economic Statistics, 17: 264-270.
Schwert, G W. (1987). Effects of model specification on tests for unit
roots in macroeconomic data. Journal of Monetary Economics, 20: 73–103. | run | python | statsmodels/statsmodels | statsmodels/tsa/stattools/_leybourne.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stattools/_leybourne.py | BSD-3-Clause |
def fit(self):
"""
Estimate a trend component, multiple seasonal components, and a
residual component.
Returns
-------
DecomposeResult
Estimation results.
"""
num_seasons = len(self.periods)
iterate = 1 if num_seasons == 1 else self.iterate
# Box Cox
if self.lmbda == "auto":
y, lmbda = boxcox(self._y, lmbda=None)
self.est_lmbda = lmbda
elif self.lmbda:
y = boxcox(self._y, lmbda=self.lmbda)
else:
y = self._y
# Get STL fit params
stl_inner_iter = self._stl_kwargs.pop("inner_iter", None)
stl_outer_iter = self._stl_kwargs.pop("outer_iter", None)
# Iterate over each seasonal component to extract seasonalities
seasonal = np.zeros(shape=(num_seasons, self.nobs))
deseas = y
for _ in range(iterate):
for i in range(num_seasons):
deseas = deseas + seasonal[i]
res = STL(
endog=deseas,
period=self.periods[i],
seasonal=self.windows[i],
**self._stl_kwargs,
).fit(inner_iter=stl_inner_iter, outer_iter=stl_outer_iter)
seasonal[i] = res.seasonal
deseas = deseas - seasonal[i]
seasonal = np.squeeze(seasonal.T)
trend = res.trend
rw = res.weights
resid = deseas - trend
# Return pandas if endog is pandas
if isinstance(self.endog, (pd.Series, pd.DataFrame)):
index = self.endog.index
y = pd.Series(y, index=index, name="observed")
trend = pd.Series(trend, index=index, name="trend")
resid = pd.Series(resid, index=index, name="resid")
rw = pd.Series(rw, index=index, name="robust_weight")
cols = [f"seasonal_{period}" for period in self.periods]
if seasonal.ndim == 1:
seasonal = pd.Series(seasonal, index=index, name="seasonal")
else:
seasonal = pd.DataFrame(seasonal, index=index, columns=cols)
return DecomposeResult(y, seasonal, trend, resid, rw) | Estimate a trend component, multiple seasonal components, and a
residual component.
Returns
-------
DecomposeResult
Estimation results. | fit | python | statsmodels/statsmodels | statsmodels/tsa/stl/mstl.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/stl/mstl.py | BSD-3-Clause |
def bkfilter(x, low=6, high=32, K=12):
"""
Filter a time series using the Baxter-King bandpass filter.
Parameters
----------
x : array_like
A 1 or 2d ndarray. If 2d, variables are assumed to be in columns.
low : float
Minimum period for oscillations, ie., Baxter and King suggest that
the Burns-Mitchell U.S. business cycle has 6 for quarterly data and
1.5 for annual data.
high : float
Maximum period for oscillations BK suggest that the U.S.
business cycle has 32 for quarterly data and 8 for annual data.
K : int
Lead-lag length of the filter. Baxter and King propose a truncation
length of 12 for quarterly data and 3 for annual data.
Returns
-------
ndarray
The cyclical component of x.
See Also
--------
statsmodels.tsa.filters.cf_filter.cffilter
The Christiano Fitzgerald asymmetric, random walk filter.
statsmodels.tsa.filters.bk_filter.hpfilter
Hodrick-Prescott filter.
statsmodels.tsa.seasonal.seasonal_decompose
Decompose a time series using moving averages.
statsmodels.tsa.seasonal.STL
Season-Trend decomposition using LOESS.
Notes
-----
Returns a centered weighted moving average of the original series. Where
the weights a[j] are computed ::
a[j] = b[j] + theta, for j = 0, +/-1, +/-2, ... +/- K
b[0] = (omega_2 - omega_1)/pi
b[j] = 1/(pi*j)(sin(omega_2*j)-sin(omega_1*j), for j = +/-1, +/-2,...
and theta is a normalizing constant ::
theta = -sum(b)/(2K+1)
See the notebook `Time Series Filters
<../examples/notebooks/generated/tsa_filters.html>`__ for an overview.
References
----------
Baxter, M. and R. G. King. "Measuring Business Cycles: Approximate
Band-Pass Filters for Economic Time Series." *Review of Economics and
Statistics*, 1999, 81(4), 575-593.
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.macrodata.load_pandas().data
>>> index = pd.DatetimeIndex(start='1959Q1', end='2009Q4', freq='Q')
>>> dta.set_index(index, inplace=True)
>>> cycles = sm.tsa.filters.bkfilter(dta[['realinv']], 6, 24, 12)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> cycles.plot(ax=ax, style=['r--', 'b-'])
>>> plt.show()
.. plot:: plots/bkf_plot.py
"""
# TODO: change the docstring to ..math::?
# TODO: allow windowing functions to correct for Gibb's Phenomenon?
# adjust bweights (symmetrically) by below before demeaning
# Lancosz Sigma Factors np.sinc(2*j/(2.*K+1))
pw = PandasWrapper(x)
x = array_like(x, 'x', maxdim=2)
omega_1 = 2. * np.pi / high # convert from freq. to periodicity
omega_2 = 2. * np.pi / low
bweights = np.zeros(2 * K + 1)
bweights[K] = (omega_2 - omega_1) / np.pi # weight at zero freq.
j = np.arange(1, int(K) + 1)
weights = 1 / (np.pi * j) * (np.sin(omega_2 * j) - np.sin(omega_1 * j))
bweights[K + j] = weights # j is an idx
bweights[:K] = weights[::-1] # make symmetric weights
bweights -= bweights.mean() # make sure weights sum to zero
if x.ndim == 2:
bweights = bweights[:, None]
x = fftconvolve(x, bweights, mode='valid')
# get a centered moving avg/convolution
return pw.wrap(x, append='cycle', trim_start=K, trim_end=K) | Filter a time series using the Baxter-King bandpass filter.
Parameters
----------
x : array_like
A 1 or 2d ndarray. If 2d, variables are assumed to be in columns.
low : float
Minimum period for oscillations, ie., Baxter and King suggest that
the Burns-Mitchell U.S. business cycle has 6 for quarterly data and
1.5 for annual data.
high : float
Maximum period for oscillations BK suggest that the U.S.
business cycle has 32 for quarterly data and 8 for annual data.
K : int
Lead-lag length of the filter. Baxter and King propose a truncation
length of 12 for quarterly data and 3 for annual data.
Returns
-------
ndarray
The cyclical component of x.
See Also
--------
statsmodels.tsa.filters.cf_filter.cffilter
The Christiano Fitzgerald asymmetric, random walk filter.
statsmodels.tsa.filters.bk_filter.hpfilter
Hodrick-Prescott filter.
statsmodels.tsa.seasonal.seasonal_decompose
Decompose a time series using moving averages.
statsmodels.tsa.seasonal.STL
Season-Trend decomposition using LOESS.
Notes
-----
Returns a centered weighted moving average of the original series. Where
the weights a[j] are computed ::
a[j] = b[j] + theta, for j = 0, +/-1, +/-2, ... +/- K
b[0] = (omega_2 - omega_1)/pi
b[j] = 1/(pi*j)(sin(omega_2*j)-sin(omega_1*j), for j = +/-1, +/-2,...
and theta is a normalizing constant ::
theta = -sum(b)/(2K+1)
See the notebook `Time Series Filters
<../examples/notebooks/generated/tsa_filters.html>`__ for an overview.
References
----------
Baxter, M. and R. G. King. "Measuring Business Cycles: Approximate
Band-Pass Filters for Economic Time Series." *Review of Economics and
Statistics*, 1999, 81(4), 575-593.
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.macrodata.load_pandas().data
>>> index = pd.DatetimeIndex(start='1959Q1', end='2009Q4', freq='Q')
>>> dta.set_index(index, inplace=True)
>>> cycles = sm.tsa.filters.bkfilter(dta[['realinv']], 6, 24, 12)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> cycles.plot(ax=ax, style=['r--', 'b-'])
>>> plt.show()
.. plot:: plots/bkf_plot.py | bkfilter | python | statsmodels/statsmodels | statsmodels/tsa/filters/bk_filter.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/filters/bk_filter.py | BSD-3-Clause |
def cffilter(x, low=6, high=32, drift=True):
"""
Christiano Fitzgerald asymmetric, random walk filter.
Parameters
----------
x : array_like
The 1 or 2d array to filter. If 2d, variables are assumed to be in
columns.
low : float
Minimum period of oscillations. Features below low periodicity are
filtered out. Default is 6 for quarterly data, giving a 1.5 year
periodicity.
high : float
Maximum period of oscillations. Features above high periodicity are
filtered out. Default is 32 for quarterly data, giving an 8 year
periodicity.
drift : bool
Whether or not to remove a trend from the data. The trend is estimated
as np.arange(nobs)*(x[-1] - x[0])/(len(x)-1).
Returns
-------
cycle : array_like
The features of x between the periodicities low and high.
trend : array_like
The trend in the data with the cycles removed.
See Also
--------
statsmodels.tsa.filters.bk_filter.bkfilter
Baxter-King filter.
statsmodels.tsa.filters.bk_filter.hpfilter
Hodrick-Prescott filter.
statsmodels.tsa.seasonal.seasonal_decompose
Decompose a time series using moving averages.
statsmodels.tsa.seasonal.STL
Season-Trend decomposition using LOESS.
Notes
-----
See the notebook `Time Series Filters
<../examples/notebooks/generated/tsa_filters.html>`__ for an overview.
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.macrodata.load_pandas().data
>>> index = pd.DatetimeIndex(start='1959Q1', end='2009Q4', freq='Q')
>>> dta.set_index(index, inplace=True)
>>> cf_cycles, cf_trend = sm.tsa.filters.cffilter(dta[["infl", "unemp"]])
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> cf_cycles.plot(ax=ax, style=['r--', 'b-'])
>>> plt.show()
.. plot:: plots/cff_plot.py
"""
#TODO: cythonize/vectorize loop?, add ability for symmetric filter,
# and estimates of theta other than random walk.
if low < 2:
raise ValueError("low must be >= 2")
pw = PandasWrapper(x)
x = array_like(x, 'x', ndim=2)
nobs, nseries = x.shape
a = 2*np.pi/high
b = 2*np.pi/low
if drift: # get drift adjusted series
x = x - np.arange(nobs)[:, None] * (x[-1] - x[0]) / (nobs - 1)
J = np.arange(1, nobs + 1)
Bj = (np.sin(b * J) - np.sin(a * J)) / (np.pi * J)
B0 = (b - a) / np.pi
Bj = np.r_[B0, Bj][:, None]
y = np.zeros((nobs, nseries))
for i in range(nobs):
B = -.5 * Bj[0] - np.sum(Bj[1:-i - 2])
A = -Bj[0] - np.sum(Bj[1:-i - 2]) - np.sum(Bj[1:i]) - B
y[i] = (Bj[0] * x[i] + np.dot(Bj[1:-i - 2].T, x[i + 1:-1]) +
B * x[-1] + np.dot(Bj[1:i].T, x[1:i][::-1]) + A * x[0])
y = y.squeeze()
cycle, trend = y.squeeze(), x.squeeze() - y
return pw.wrap(cycle, append='cycle'), pw.wrap(trend, append='trend') | Christiano Fitzgerald asymmetric, random walk filter.
Parameters
----------
x : array_like
The 1 or 2d array to filter. If 2d, variables are assumed to be in
columns.
low : float
Minimum period of oscillations. Features below low periodicity are
filtered out. Default is 6 for quarterly data, giving a 1.5 year
periodicity.
high : float
Maximum period of oscillations. Features above high periodicity are
filtered out. Default is 32 for quarterly data, giving an 8 year
periodicity.
drift : bool
Whether or not to remove a trend from the data. The trend is estimated
as np.arange(nobs)*(x[-1] - x[0])/(len(x)-1).
Returns
-------
cycle : array_like
The features of x between the periodicities low and high.
trend : array_like
The trend in the data with the cycles removed.
See Also
--------
statsmodels.tsa.filters.bk_filter.bkfilter
Baxter-King filter.
statsmodels.tsa.filters.bk_filter.hpfilter
Hodrick-Prescott filter.
statsmodels.tsa.seasonal.seasonal_decompose
Decompose a time series using moving averages.
statsmodels.tsa.seasonal.STL
Season-Trend decomposition using LOESS.
Notes
-----
See the notebook `Time Series Filters
<../examples/notebooks/generated/tsa_filters.html>`__ for an overview.
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.macrodata.load_pandas().data
>>> index = pd.DatetimeIndex(start='1959Q1', end='2009Q4', freq='Q')
>>> dta.set_index(index, inplace=True)
>>> cf_cycles, cf_trend = sm.tsa.filters.cffilter(dta[["infl", "unemp"]])
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> cf_cycles.plot(ax=ax, style=['r--', 'b-'])
>>> plt.show()
.. plot:: plots/cff_plot.py | cffilter | python | statsmodels/statsmodels | statsmodels/tsa/filters/cf_filter.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/filters/cf_filter.py | BSD-3-Clause |
def pandas_wrapper_freq(func, trim_head=None, trim_tail=None,
freq_kw='freq', columns=None, *args, **kwargs):
"""
Return a new function that catches the incoming X, checks if it's pandas,
calls the functions as is. Then wraps the results in the incoming index.
Deals with frequencies. Expects that the function returns a tuple,
a Bunch object, or a pandas-object.
"""
@wraps(func)
def new_func(X, *args, **kwargs):
# quick pass-through for do nothing case
if not _is_using_pandas(X, None):
return func(X, *args, **kwargs)
wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail,
columns)
index = X.index
freq = index.inferred_freq
kwargs.update({freq_kw : freq_to_period(freq)})
ret = func(X, *args, **kwargs)
ret = wrapper_func(ret)
return ret
return new_func | Return a new function that catches the incoming X, checks if it's pandas,
calls the functions as is. Then wraps the results in the incoming index.
Deals with frequencies. Expects that the function returns a tuple,
a Bunch object, or a pandas-object. | pandas_wrapper_freq | python | statsmodels/statsmodels | statsmodels/tsa/filters/_utils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/filters/_utils.py | BSD-3-Clause |
def fftconvolveinv(in1, in2, mode="full"):
"""
Convolve two N-dimensional arrays using FFT. See convolve.
copied from scipy.signal.signaltools, but here used to try out inverse
filter. does not work or I cannot get it to work
2010-10-23:
looks ok to me for 1d,
from results below with padded data array (fftp)
but it does not work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fft.fftn(in1,fsize)
#IN1 *= fftn(in2,fsize) #JP: this looks like the only change I made
IN1 /= fft.fftn(in2,fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO does not seem to work for VARMA
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1) | Convolve two N-dimensional arrays using FFT. See convolve.
copied from scipy.signal.signaltools, but here used to try out inverse
filter. does not work or I cannot get it to work
2010-10-23:
looks ok to me for 1d,
from results below with padded data array (fftp)
but it does not work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn | fftconvolveinv | python | statsmodels/statsmodels | statsmodels/tsa/filters/filtertools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/filters/filtertools.py | BSD-3-Clause |
def fftconvolve3(in1, in2=None, in3=None, mode="full"):
"""
Convolve two N-dimensional arrays using FFT. See convolve.
For use with arma (old version: in1=num in2=den in3=data
* better for consistency with other functions in1=data in2=num in3=den
* note in2 and in3 need to have consistent dimension/shape
since I'm using max of in2, in3 shapes and not the sum
copied from scipy.signal.signaltools, but here used to try out inverse
filter does not work or I cannot get it to work
2010-10-23
looks ok to me for 1d,
from results below with padded data array (fftp)
but it does not work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
if (in2 is None) and (in3 is None):
raise ValueError('at least one of in2 and in3 needs to be given')
s1 = np.array(in1.shape)
if in2 is not None:
s2 = np.array(in2.shape)
else:
s2 = 0
if in3 is not None:
s3 = np.array(in3.shape)
s2 = max(s2, s3) # try this looks reasonable for ARMA
#s2 = s3
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
#convolve shorter ones first, not sure if it matters
IN1 = in1.copy() # TODO: Is this correct?
if in2 is not None:
IN1 = fft.fftn(in2, fsize)
if in3 is not None:
IN1 /= fft.fftn(in3, fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO does not seem to work for VARMA
IN1 *= fft.fftn(in1, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1) | Convolve two N-dimensional arrays using FFT. See convolve.
For use with arma (old version: in1=num in2=den in3=data
* better for consistency with other functions in1=data in2=num in3=den
* note in2 and in3 need to have consistent dimension/shape
since I'm using max of in2, in3 shapes and not the sum
copied from scipy.signal.signaltools, but here used to try out inverse
filter does not work or I cannot get it to work
2010-10-23
looks ok to me for 1d,
from results below with padded data array (fftp)
but it does not work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn | fftconvolve3 | python | statsmodels/statsmodels | statsmodels/tsa/filters/filtertools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/filters/filtertools.py | BSD-3-Clause |
def recursive_filter(x, ar_coeff, init=None):
"""
Autoregressive, or recursive, filtering.
Parameters
----------
x : array_like
Time-series data. Should be 1d or n x 1.
ar_coeff : array_like
AR coefficients in reverse time order. See Notes for details.
init : array_like
Initial values of the time-series prior to the first value of y.
The default is zero.
Returns
-------
array_like
Filtered array, number of columns determined by x and ar_coeff. If x
is a pandas object than a Series is returned.
Notes
-----
Computes the recursive filter ::
y[n] = ar_coeff[0] * y[n-1] + ...
+ ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n]
where n_coeff = len(n_coeff).
"""
pw = PandasWrapper(x)
x = array_like(x, 'x')
ar_coeff = array_like(ar_coeff, 'ar_coeff')
if init is not None: # integer init are treated differently in lfiltic
init = array_like(init, 'init')
if len(init) != len(ar_coeff):
raise ValueError("ar_coeff must be the same length as init")
if init is not None:
zi = signal.lfiltic([1], np.r_[1, -ar_coeff], init, x)
else:
zi = None
y = signal.lfilter([1.], np.r_[1, -ar_coeff], x, zi=zi)
if init is not None:
result = y[0]
else:
result = y
return pw.wrap(result) | Autoregressive, or recursive, filtering.
Parameters
----------
x : array_like
Time-series data. Should be 1d or n x 1.
ar_coeff : array_like
AR coefficients in reverse time order. See Notes for details.
init : array_like
Initial values of the time-series prior to the first value of y.
The default is zero.
Returns
-------
array_like
Filtered array, number of columns determined by x and ar_coeff. If x
is a pandas object than a Series is returned.
Notes
-----
Computes the recursive filter ::
y[n] = ar_coeff[0] * y[n-1] + ...
+ ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n]
where n_coeff = len(n_coeff). | recursive_filter | python | statsmodels/statsmodels | statsmodels/tsa/filters/filtertools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/filters/filtertools.py | BSD-3-Clause |
def convolution_filter(x, filt, nsides=2):
"""
Linear filtering via convolution. Centered and backward displaced moving
weighted average.
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
filt : array_like
Linear filter coefficients in reverse time-order. Should have the
same number of dimensions as x though if 1d and ``x`` is 2d will be
coerced to 2d.
nsides : int, optional
If 2, a centered moving average is computed using the filter
coefficients. If 1, the filter coefficients are for past values only.
Both methods use scipy.signal.convolve.
Returns
-------
y : ndarray, 2d
Filtered array, number of columns determined by x and filt. If a
pandas object is given, a pandas object is returned. The index of
the return is the exact same as the time period in ``x``
Notes
-----
In nsides == 1, x is filtered ::
y[n] = filt[0]*x[n-1] + ... + filt[n_filt-1]*x[n-n_filt]
where n_filt is len(filt).
If nsides == 2, x is filtered around lag 0 ::
y[n] = filt[0]*x[n - n_filt/2] + ... + filt[n_filt / 2] * x[n]
+ ... + x[n + n_filt/2]
where n_filt is len(filt). If n_filt is even, then more of the filter
is forward in time than backward.
If filt is 1d or (nlags,1) one lag polynomial is applied to all
variables (columns of x). If filt is 2d, (nlags, nvars) each series is
independently filtered with its own lag polynomial, uses loop over nvar.
This is different than the usual 2d vs 2d convolution.
Filtering is done with scipy.signal.convolve, so it will be reasonably
fast for medium sized data. For large data fft convolution would be
faster.
"""
# for nsides shift the index instead of using 0 for 0 lag this
# allows correct handling of NaNs
if nsides == 1:
trim_head = len(filt) - 1
trim_tail = None
elif nsides == 2:
trim_head = int(np.ceil(len(filt)/2.) - 1) or None
trim_tail = int(np.ceil(len(filt)/2.) - len(filt) % 2) or None
else: # pragma : no cover
raise ValueError("nsides must be 1 or 2")
pw = PandasWrapper(x)
x = array_like(x, 'x', maxdim=2)
filt = array_like(filt, 'filt', ndim=x.ndim)
if filt.ndim == 1 or min(filt.shape) == 1:
result = signal.convolve(x, filt, mode='valid')
else: # filt.ndim == 2
nlags = filt.shape[0]
nvar = x.shape[1]
result = np.zeros((x.shape[0] - nlags + 1, nvar))
if nsides == 2:
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:, i] = signal.convolve(x[:, i], filt[:, i],
mode='valid')
elif nsides == 1:
for i in range(nvar):
result[:, i] = signal.convolve(x[:, i], np.r_[0, filt[:, i]],
mode='valid')
result = _pad_nans(result, trim_head, trim_tail)
return pw.wrap(result) | Linear filtering via convolution. Centered and backward displaced moving
weighted average.
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
filt : array_like
Linear filter coefficients in reverse time-order. Should have the
same number of dimensions as x though if 1d and ``x`` is 2d will be
coerced to 2d.
nsides : int, optional
If 2, a centered moving average is computed using the filter
coefficients. If 1, the filter coefficients are for past values only.
Both methods use scipy.signal.convolve.
Returns
-------
y : ndarray, 2d
Filtered array, number of columns determined by x and filt. If a
pandas object is given, a pandas object is returned. The index of
the return is the exact same as the time period in ``x``
Notes
-----
In nsides == 1, x is filtered ::
y[n] = filt[0]*x[n-1] + ... + filt[n_filt-1]*x[n-n_filt]
where n_filt is len(filt).
If nsides == 2, x is filtered around lag 0 ::
y[n] = filt[0]*x[n - n_filt/2] + ... + filt[n_filt / 2] * x[n]
+ ... + x[n + n_filt/2]
where n_filt is len(filt). If n_filt is even, then more of the filter
is forward in time than backward.
If filt is 1d or (nlags,1) one lag polynomial is applied to all
variables (columns of x). If filt is 2d, (nlags, nvars) each series is
independently filtered with its own lag polynomial, uses loop over nvar.
This is different than the usual 2d vs 2d convolution.
Filtering is done with scipy.signal.convolve, so it will be reasonably
fast for medium sized data. For large data fft convolution would be
faster. | convolution_filter | python | statsmodels/statsmodels | statsmodels/tsa/filters/filtertools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/filters/filtertools.py | BSD-3-Clause |
def miso_lfilter(ar, ma, x, useic=False):
"""
Filter multiple time series into a single time series.
Uses a convolution to merge inputs, and then lfilter to produce output.
Parameters
----------
ar : array_like
The coefficients of autoregressive lag polynomial including lag zero,
ar(L) in the expression ar(L)y_t.
ma : array_like, same ndim as x, currently 2d
The coefficient of the moving average lag polynomial, ma(L) in
ma(L)x_t.
x : array_like
The 2-d input data series, time in rows, variables in columns.
useic : bool
Flag indicating whether to use initial conditions.
Returns
-------
y : ndarray
The filtered output series.
inp : ndarray, 1d
The combined input series.
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that:
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs, nvars), ar (narlags,), and
ma (narlags, nvars).
"""
ma = array_like(ma, 'ma')
ar = array_like(ar, 'ar')
inp = signal.correlate(x, ma[::-1, :])[:, (x.shape[1] + 1) // 2]
# for testing 2d equivalence between convolve and correlate
# inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
# np.testing.assert_almost_equal(inp2, inp)
nobs = x.shape[0]
# cut of extra values at end
# TODO: initialize also x for correlate
if useic:
return signal.lfilter([1], ar, inp,
zi=signal.lfiltic(np.array([1., 0.]), ar,
useic))[0][:nobs], inp[:nobs]
else:
return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs] | Filter multiple time series into a single time series.
Uses a convolution to merge inputs, and then lfilter to produce output.
Parameters
----------
ar : array_like
The coefficients of autoregressive lag polynomial including lag zero,
ar(L) in the expression ar(L)y_t.
ma : array_like, same ndim as x, currently 2d
The coefficient of the moving average lag polynomial, ma(L) in
ma(L)x_t.
x : array_like
The 2-d input data series, time in rows, variables in columns.
useic : bool
Flag indicating whether to use initial conditions.
Returns
-------
y : ndarray
The filtered output series.
inp : ndarray, 1d
The combined input series.
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that:
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs, nvars), ar (narlags,), and
ma (narlags, nvars). | miso_lfilter | python | statsmodels/statsmodels | statsmodels/tsa/filters/filtertools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/filters/filtertools.py | BSD-3-Clause |
def hpfilter(x, lamb=1600):
"""
Hodrick-Prescott filter.
Parameters
----------
x : array_like
The time series to filter, 1-d.
lamb : float
The Hodrick-Prescott smoothing parameter. A value of 1600 is
suggested for quarterly data. Ravn and Uhlig suggest using a value
of 6.25 (1600/4**4) for annual data and 129600 (1600*3**4) for monthly
data.
Returns
-------
cycle : ndarray
The estimated cycle in the data given lamb.
trend : ndarray
The estimated trend in the data given lamb.
See Also
--------
statsmodels.tsa.filters.bk_filter.bkfilter
Baxter-King filter.
statsmodels.tsa.filters.cf_filter.cffilter
The Christiano Fitzgerald asymmetric, random walk filter.
statsmodels.tsa.seasonal.seasonal_decompose
Decompose a time series using moving averages.
statsmodels.tsa.seasonal.STL
Season-Trend decomposition using LOESS.
Notes
-----
The HP filter removes a smooth trend, `T`, from the data `x`. by solving
min sum((x[t] - T[t])**2 + lamb*((T[t+1] - T[t]) - (T[t] - T[t-1]))**2)
T t
Here we implemented the HP filter as a ridge-regression rule using
scipy.sparse. In this sense, the solution can be written as
T = inv(I + lamb*K'K)x
where I is a nobs x nobs identity matrix, and K is a (nobs-2) x nobs matrix
such that
K[i,j] = 1 if i == j or i == j + 2
K[i,j] = -2 if i == j + 1
K[i,j] = 0 otherwise
See the notebook `Time Series Filters
<../examples/notebooks/generated/tsa_filters.html>`__ for an overview.
References
----------
Hodrick, R.J, and E. C. Prescott. 1980. "Postwar U.S. Business Cycles: An
Empirical Investigation." `Carnegie Mellon University discussion
paper no. 451`.
Ravn, M.O and H. Uhlig. 2002. "Notes On Adjusted the Hodrick-Prescott
Filter for the Frequency of Observations." `The Review of Economics and
Statistics`, 84(2), 371-80.
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.macrodata.load_pandas().data
>>> index = pd.period_range('1959Q1', '2009Q3', freq='Q')
>>> dta.set_index(index, inplace=True)
>>> cycle, trend = sm.tsa.filters.hpfilter(dta.realgdp, 1600)
>>> gdp_decomp = dta[['realgdp']]
>>> gdp_decomp["cycle"] = cycle
>>> gdp_decomp["trend"] = trend
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> gdp_decomp[["realgdp", "trend"]]["2000-03-31":].plot(ax=ax,
... fontsize=16)
>>> plt.show()
.. plot:: plots/hpf_plot.py
"""
pw = PandasWrapper(x)
x = array_like(x, 'x', ndim=1)
nobs = len(x)
I = sparse.eye(nobs, nobs) # noqa:E741
offsets = np.array([0, 1, 2])
data = np.repeat([[1.], [-2.], [1.]], nobs, axis=1)
K = sparse.dia_matrix((data, offsets), shape=(nobs - 2, nobs))
use_umfpack = True
trend = spsolve(I+lamb*K.T.dot(K), x, use_umfpack=use_umfpack)
cycle = x - trend
return pw.wrap(cycle, append='cycle'), pw.wrap(trend, append='trend') | Hodrick-Prescott filter.
Parameters
----------
x : array_like
The time series to filter, 1-d.
lamb : float
The Hodrick-Prescott smoothing parameter. A value of 1600 is
suggested for quarterly data. Ravn and Uhlig suggest using a value
of 6.25 (1600/4**4) for annual data and 129600 (1600*3**4) for monthly
data.
Returns
-------
cycle : ndarray
The estimated cycle in the data given lamb.
trend : ndarray
The estimated trend in the data given lamb.
See Also
--------
statsmodels.tsa.filters.bk_filter.bkfilter
Baxter-King filter.
statsmodels.tsa.filters.cf_filter.cffilter
The Christiano Fitzgerald asymmetric, random walk filter.
statsmodels.tsa.seasonal.seasonal_decompose
Decompose a time series using moving averages.
statsmodels.tsa.seasonal.STL
Season-Trend decomposition using LOESS.
Notes
-----
The HP filter removes a smooth trend, `T`, from the data `x`. by solving
min sum((x[t] - T[t])**2 + lamb*((T[t+1] - T[t]) - (T[t] - T[t-1]))**2)
T t
Here we implemented the HP filter as a ridge-regression rule using
scipy.sparse. In this sense, the solution can be written as
T = inv(I + lamb*K'K)x
where I is a nobs x nobs identity matrix, and K is a (nobs-2) x nobs matrix
such that
K[i,j] = 1 if i == j or i == j + 2
K[i,j] = -2 if i == j + 1
K[i,j] = 0 otherwise
See the notebook `Time Series Filters
<../examples/notebooks/generated/tsa_filters.html>`__ for an overview.
References
----------
Hodrick, R.J, and E. C. Prescott. 1980. "Postwar U.S. Business Cycles: An
Empirical Investigation." `Carnegie Mellon University discussion
paper no. 451`.
Ravn, M.O and H. Uhlig. 2002. "Notes On Adjusted the Hodrick-Prescott
Filter for the Frequency of Observations." `The Review of Economics and
Statistics`, 84(2), 371-80.
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.macrodata.load_pandas().data
>>> index = pd.period_range('1959Q1', '2009Q3', freq='Q')
>>> dta.set_index(index, inplace=True)
>>> cycle, trend = sm.tsa.filters.hpfilter(dta.realgdp, 1600)
>>> gdp_decomp = dta[['realgdp']]
>>> gdp_decomp["cycle"] = cycle
>>> gdp_decomp["trend"] = trend
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> gdp_decomp[["realgdp", "trend"]]["2000-03-31":].plot(ax=ax,
... fontsize=16)
>>> plt.show()
.. plot:: plots/hpf_plot.py | hpfilter | python | statsmodels/statsmodels | statsmodels/tsa/filters/hp_filter.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/filters/hp_filter.py | BSD-3-Clause |
def _extrapolate_trend(trend, npoints):
"""
Replace nan values on trend's end-points with least-squares extrapolated
values with regression considering npoints closest defined points.
"""
front = next(i for i, vals in enumerate(trend) if not np.any(np.isnan(vals)))
back = (
trend.shape[0]
- 1
- next(i for i, vals in enumerate(trend[::-1]) if not np.any(np.isnan(vals)))
)
front_last = min(front + npoints, back)
back_first = max(front, back - npoints)
k, n = np.linalg.lstsq(
np.c_[np.arange(front, front_last), np.ones(front_last - front)],
trend[front:front_last],
rcond=-1,
)[0]
extra = (np.arange(0, front) * np.c_[k] + np.c_[n]).T
if trend.ndim == 1:
extra = extra.squeeze()
trend[:front] = extra
k, n = np.linalg.lstsq(
np.c_[np.arange(back_first, back), np.ones(back - back_first)],
trend[back_first:back],
rcond=-1,
)[0]
extra = (np.arange(back + 1, trend.shape[0]) * np.c_[k] + np.c_[n]).T
if trend.ndim == 1:
extra = extra.squeeze()
trend[back + 1 :] = extra
return trend | Replace nan values on trend's end-points with least-squares extrapolated
values with regression considering npoints closest defined points. | _extrapolate_trend | python | statsmodels/statsmodels | statsmodels/tsa/seasonal/_seasonal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal/_seasonal.py | BSD-3-Clause |
def seasonal_mean(x, period):
"""
Return means for each period in x. period is an int that gives the
number of periods per cycle. E.g., 12 for monthly. NaNs are ignored
in the mean.
"""
return np.array([pd_nanmean(x[i::period], axis=0) for i in range(period)]) | Return means for each period in x. period is an int that gives the
number of periods per cycle. E.g., 12 for monthly. NaNs are ignored
in the mean. | seasonal_mean | python | statsmodels/statsmodels | statsmodels/tsa/seasonal/_seasonal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal/_seasonal.py | BSD-3-Clause |
def seasonal_decompose(
x,
model="additive",
filt=None,
period=None,
two_sided=True,
extrapolate_trend=0,
):
"""
Seasonal decomposition using moving averages.
Parameters
----------
x : array_like
Time series. If 2d, individual series are in columns. x must contain 2
complete cycles.
model : {"additive", "multiplicative"}, optional
Type of seasonal component. Abbreviations are accepted.
filt : array_like, optional
The filter coefficients for filtering out the seasonal component.
The concrete moving average method used in filtering is determined by
two_sided.
period : int, optional
Period of the series (eg, 1 for annual, 4 for quarterly, etc). Must be
used if x is not a pandas object or if the index of x does not have a
frequency. Overrides default periodicity of x if x is a pandas object
with a timeseries index.
two_sided : bool, optional
The moving average method used in filtering.
If True (default), a centered moving average is computed using the
filt. If False, the filter coefficients are for past values only.
extrapolate_trend : int or 'freq', optional
If set to > 0, the trend resulting from the convolution is
linear least-squares extrapolated on both ends (or the single one
if two_sided is False) considering this many (+1) closest points.
If set to 'freq', use `freq` closest points. Setting this parameter
results in no NaN values in trend or resid components.
Returns
-------
DecomposeResult
A object with seasonal, trend, and resid attributes.
See Also
--------
statsmodels.tsa.filters.bk_filter.bkfilter
Baxter-King filter.
statsmodels.tsa.filters.cf_filter.cffilter
Christiano-Fitzgerald asymmetric, random walk filter.
statsmodels.tsa.filters.hp_filter.hpfilter
Hodrick-Prescott filter.
statsmodels.tsa.filters.convolution_filter
Linear filtering via convolution.
statsmodels.tsa.seasonal.STL
Season-Trend decomposition using LOESS.
Notes
-----
This is a naive decomposition. More sophisticated methods should
be preferred.
The additive model is Y[t] = T[t] + S[t] + e[t]
The multiplicative model is Y[t] = T[t] * S[t] * e[t]
The results are obtained by first estimating the trend by applying
a convolution filter to the data. The trend is then removed from the
series and the average of this de-trended series for each period is
the returned seasonal component.
"""
pfreq = period
pw = PandasWrapper(x)
if period is None:
if isinstance(x, (pd.Series, pd.DataFrame)):
index = x.index
if isinstance(index, pd.PeriodIndex):
pfreq = index.freq
else:
pfreq = getattr(index, "freq", None) or getattr(
index, "inferred_freq", None
)
x = array_like(x, "x", maxdim=2)
nobs = len(x)
if not np.all(np.isfinite(x)):
raise ValueError("This function does not handle missing values")
if model.startswith("m"):
if np.any(x <= 0):
raise ValueError(
"Multiplicative seasonality is not appropriate "
"for zero and negative values"
)
if period is None:
if pfreq is not None:
pfreq = freq_to_period(pfreq)
period = pfreq
else:
raise ValueError(
"You must specify a period or x must be a pandas object with "
"a PeriodIndex or a DatetimeIndex with a freq not set to None"
)
if x.shape[0] < 2 * pfreq:
raise ValueError(
f"x must have 2 complete cycles requires {2 * pfreq} "
f"observations. x only has {x.shape[0]} observation(s)"
)
if filt is None:
if period % 2 == 0: # split weights at ends
filt = np.array([0.5] + [1] * (period - 1) + [0.5]) / period
else:
filt = np.repeat(1.0 / period, period)
nsides = int(two_sided) + 1
trend = convolution_filter(x, filt, nsides)
if extrapolate_trend == "freq":
extrapolate_trend = period - 1
if extrapolate_trend > 0:
trend = _extrapolate_trend(trend, extrapolate_trend + 1)
if model.startswith("m"):
detrended = x / trend
else:
detrended = x - trend
period_averages = seasonal_mean(detrended, period)
if model.startswith("m"):
period_averages /= np.mean(period_averages, axis=0)
else:
period_averages -= np.mean(period_averages, axis=0)
seasonal = np.tile(period_averages.T, nobs // period + 1).T[:nobs]
if model.startswith("m"):
resid = x / seasonal / trend
else:
resid = detrended - seasonal
results = []
for s, name in zip(
(seasonal, trend, resid, x), ("seasonal", "trend", "resid", None)
):
results.append(pw.wrap(s.squeeze(), columns=name))
return DecomposeResult(
seasonal=results[0],
trend=results[1],
resid=results[2],
observed=results[3],
) | Seasonal decomposition using moving averages.
Parameters
----------
x : array_like
Time series. If 2d, individual series are in columns. x must contain 2
complete cycles.
model : {"additive", "multiplicative"}, optional
Type of seasonal component. Abbreviations are accepted.
filt : array_like, optional
The filter coefficients for filtering out the seasonal component.
The concrete moving average method used in filtering is determined by
two_sided.
period : int, optional
Period of the series (eg, 1 for annual, 4 for quarterly, etc). Must be
used if x is not a pandas object or if the index of x does not have a
frequency. Overrides default periodicity of x if x is a pandas object
with a timeseries index.
two_sided : bool, optional
The moving average method used in filtering.
If True (default), a centered moving average is computed using the
filt. If False, the filter coefficients are for past values only.
extrapolate_trend : int or 'freq', optional
If set to > 0, the trend resulting from the convolution is
linear least-squares extrapolated on both ends (or the single one
if two_sided is False) considering this many (+1) closest points.
If set to 'freq', use `freq` closest points. Setting this parameter
results in no NaN values in trend or resid components.
Returns
-------
DecomposeResult
A object with seasonal, trend, and resid attributes.
See Also
--------
statsmodels.tsa.filters.bk_filter.bkfilter
Baxter-King filter.
statsmodels.tsa.filters.cf_filter.cffilter
Christiano-Fitzgerald asymmetric, random walk filter.
statsmodels.tsa.filters.hp_filter.hpfilter
Hodrick-Prescott filter.
statsmodels.tsa.filters.convolution_filter
Linear filtering via convolution.
statsmodels.tsa.seasonal.STL
Season-Trend decomposition using LOESS.
Notes
-----
This is a naive decomposition. More sophisticated methods should
be preferred.
The additive model is Y[t] = T[t] + S[t] + e[t]
The multiplicative model is Y[t] = T[t] * S[t] * e[t]
The results are obtained by first estimating the trend by applying
a convolution filter to the data. The trend is then removed from the
series and the average of this de-trended series for each period is
the returned seasonal component. | seasonal_decompose | python | statsmodels/statsmodels | statsmodels/tsa/seasonal/_seasonal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal/_seasonal.py | BSD-3-Clause |
def observed(self):
"""Observed data"""
return self._observed | Observed data | observed | python | statsmodels/statsmodels | statsmodels/tsa/seasonal/_seasonal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal/_seasonal.py | BSD-3-Clause |
def seasonal(self):
"""The estimated seasonal component"""
return self._seasonal | The estimated seasonal component | seasonal | python | statsmodels/statsmodels | statsmodels/tsa/seasonal/_seasonal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal/_seasonal.py | BSD-3-Clause |
def trend(self):
"""The estimated trend component"""
return self._trend | The estimated trend component | trend | python | statsmodels/statsmodels | statsmodels/tsa/seasonal/_seasonal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal/_seasonal.py | BSD-3-Clause |
def resid(self):
"""The estimated residuals"""
return self._resid | The estimated residuals | resid | python | statsmodels/statsmodels | statsmodels/tsa/seasonal/_seasonal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal/_seasonal.py | BSD-3-Clause |
def weights(self):
"""The weights used in the robust estimation"""
return self._weights | The weights used in the robust estimation | weights | python | statsmodels/statsmodels | statsmodels/tsa/seasonal/_seasonal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal/_seasonal.py | BSD-3-Clause |
def nobs(self):
"""Number of observations"""
return self._observed.shape | Number of observations | nobs | python | statsmodels/statsmodels | statsmodels/tsa/seasonal/_seasonal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal/_seasonal.py | BSD-3-Clause |
def plot(
self,
observed=True,
seasonal=True,
trend=True,
resid=True,
weights=False,
):
"""
Plot estimated components
Parameters
----------
observed : bool
Include the observed series in the plot
seasonal : bool
Include the seasonal component in the plot
trend : bool
Include the trend component in the plot
resid : bool
Include the residual in the plot
weights : bool
Include the weights in the plot (if any)
Returns
-------
matplotlib.figure.Figure
The figure instance that containing the plot.
"""
from pandas.plotting import register_matplotlib_converters
from statsmodels.graphics.utils import _import_mpl
plt = _import_mpl()
register_matplotlib_converters()
series = [(self._observed, "Observed")] if observed else []
series += [(self.trend, "trend")] if trend else []
if self.seasonal.ndim == 1:
series += [(self.seasonal, "seasonal")] if seasonal else []
elif self.seasonal.ndim > 1:
if isinstance(self.seasonal, pd.DataFrame):
for col in self.seasonal.columns:
series += [(self.seasonal[col], "seasonal")] if seasonal else []
else:
for i in range(self.seasonal.shape[1]):
series += [(self.seasonal[:, i], "seasonal")] if seasonal else []
series += [(self.resid, "residual")] if resid else []
series += [(self.weights, "weights")] if weights else []
if isinstance(self._observed, (pd.DataFrame, pd.Series)):
nobs = self._observed.shape[0]
xlim = self._observed.index[0], self._observed.index[nobs - 1]
else:
xlim = (0, self._observed.shape[0] - 1)
fig, axs = plt.subplots(len(series), 1, sharex=True)
for i, (ax, (series, def_name)) in enumerate(zip(axs, series)):
if def_name != "residual":
ax.plot(series)
else:
ax.plot(series, marker="o", linestyle="none")
ax.plot(xlim, (0, 0), color="#000000", zorder=-3)
name = getattr(series, "name", def_name)
if def_name != "Observed":
name = name.capitalize()
title = ax.set_title if i == 0 and observed else ax.set_ylabel
title(name)
ax.set_xlim(xlim)
fig.tight_layout()
return fig | Plot estimated components
Parameters
----------
observed : bool
Include the observed series in the plot
seasonal : bool
Include the seasonal component in the plot
trend : bool
Include the trend component in the plot
resid : bool
Include the residual in the plot
weights : bool
Include the weights in the plot (if any)
Returns
-------
matplotlib.figure.Figure
The figure instance that containing the plot. | plot | python | statsmodels/statsmodels | statsmodels/tsa/seasonal/_seasonal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal/_seasonal.py | BSD-3-Clause |
def summary(self):
"""Return summary"""
title = self.title + ". " + self.h0 + ". " \
+ self.conclusion_str + self.signif_str + "."
data_fmt = {"data_fmts": ["%#0.4g", "%#0.4g", "%#0.3F", "%s"]}
html_data_fmt = dict(data_fmt)
html_data_fmt["data_fmts"] = ["<td>" + i + "</td>"
for i in html_data_fmt["data_fmts"]]
return SimpleTable(data=[[self.test_statistic, self.crit_value,
self.pvalue, str(self.df)]],
headers=['Test statistic', 'Critical value',
'p-value', 'df'],
title=title,
txt_fmt=data_fmt,
html_fmt=html_data_fmt,
ltx_fmt=data_fmt) | Return summary | summary | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/hypothesis_test_results.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/hypothesis_test_results.py | BSD-3-Clause |
def cov(self, orth=False):
"""
Compute asymptotic standard errors for impulse response coefficients
Notes
-----
Lütkepohl eq 3.7.5
Returns
-------
"""
if orth:
return self._orth_cov()
covs = self._empty_covm(self.periods + 1)
covs[0] = np.zeros((self.neqs ** 2, self.neqs ** 2))
for i in range(1, self.periods + 1):
Gi = self.G[i - 1]
covs[i] = Gi @ self.cov_a @ Gi.T
return covs | Compute asymptotic standard errors for impulse response coefficients
Notes
-----
Lütkepohl eq 3.7.5
Returns
------- | cov | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/irf.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/irf.py | BSD-3-Clause |
def errband_mc(self, orth=False, svar=False, repl=1000,
signif=0.05, seed=None, burn=100):
"""
IRF Monte Carlo integrated error bands
"""
model = self.model
periods = self.periods
if svar:
return model.sirf_errband_mc(orth=orth, repl=repl, steps=periods,
signif=signif, seed=seed,
burn=burn, cum=False)
else:
return model.irf_errband_mc(orth=orth, repl=repl, steps=periods,
signif=signif, seed=seed,
burn=burn, cum=False) | IRF Monte Carlo integrated error bands | errband_mc | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/irf.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/irf.py | BSD-3-Clause |
def err_band_sz1(self, orth=False, svar=False, repl=1000,
signif=0.05, seed=None, burn=100, component=None):
"""
IRF Sims-Zha error band method 1. Assumes symmetric error bands around
mean.
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse responses
repl : int, default 1000
Number of MC replications
signif : float (0 < signif < 1)
Significance level for error bars, defaults to 95% CI
seed : int, default None
np.random seed
burn : int, default 100
Number of initial simulated obs to discard
component : neqs x neqs array, default to largest for each
Index of column of eigenvector/value to use for each error band
Note: period of impulse (t=0) is not included when computing
principle component
References
----------
Sims, Christopher A., and Tao Zha. 1999. "Error Bands for Impulse
Response". Econometrica 67: 1113-1155.
"""
model = self.model
periods = self.periods
irfs = self._choose_irfs(orth, svar)
neqs = self.neqs
irf_resim = model.irf_resim(orth=orth, repl=repl, steps=periods,
seed=seed, burn=burn)
q = util.norm_signif_level(signif)
W, eigva, k =self._eigval_decomp_SZ(irf_resim)
if component is not None:
if np.shape(component) != (neqs,neqs):
raise ValueError("Component array must be " + str(neqs) + " x " + str(neqs))
if np.argmax(component) >= neqs*periods:
raise ValueError("Atleast one of the components does not exist")
else:
k = component
# here take the kth column of W, which we determine by finding the largest eigenvalue of the covaraince matrix
lower = np.copy(irfs)
upper = np.copy(irfs)
for i in range(neqs):
for j in range(neqs):
lower[1:,i,j] = irfs[1:,i,j] + W[i,j,:,k[i,j]]*q*np.sqrt(eigva[i,j,k[i,j]])
upper[1:,i,j] = irfs[1:,i,j] - W[i,j,:,k[i,j]]*q*np.sqrt(eigva[i,j,k[i,j]])
return lower, upper | IRF Sims-Zha error band method 1. Assumes symmetric error bands around
mean.
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse responses
repl : int, default 1000
Number of MC replications
signif : float (0 < signif < 1)
Significance level for error bars, defaults to 95% CI
seed : int, default None
np.random seed
burn : int, default 100
Number of initial simulated obs to discard
component : neqs x neqs array, default to largest for each
Index of column of eigenvector/value to use for each error band
Note: period of impulse (t=0) is not included when computing
principle component
References
----------
Sims, Christopher A., and Tao Zha. 1999. "Error Bands for Impulse
Response". Econometrica 67: 1113-1155. | err_band_sz1 | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/irf.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/irf.py | BSD-3-Clause |
def err_band_sz2(self, orth=False, svar=False, repl=1000, signif=0.05,
seed=None, burn=100, component=None):
"""
IRF Sims-Zha error band method 2.
This method Does not assume symmetric error bands around mean.
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse responses
repl : int, default 1000
Number of MC replications
signif : float (0 < signif < 1)
Significance level for error bars, defaults to 95% CI
seed : int, default None
np.random seed
burn : int, default 100
Number of initial simulated obs to discard
component : neqs x neqs array, default to largest for each
Index of column of eigenvector/value to use for each error band
Note: period of impulse (t=0) is not included when computing
principle component
References
----------
Sims, Christopher A., and Tao Zha. 1999. "Error Bands for Impulse
Response". Econometrica 67: 1113-1155.
"""
model = self.model
periods = self.periods
irfs = self._choose_irfs(orth, svar)
neqs = self.neqs
irf_resim = model.irf_resim(orth=orth, repl=repl, steps=periods, seed=seed,
burn=100)
W, eigva, k = self._eigval_decomp_SZ(irf_resim)
if component is not None:
if np.shape(component) != (neqs,neqs):
raise ValueError("Component array must be " + str(neqs) + " x " + str(neqs))
if np.argmax(component) >= neqs*periods:
raise ValueError("Atleast one of the components does not exist")
else:
k = component
gamma = np.zeros((repl, periods+1, neqs, neqs))
for p in range(repl):
for i in range(neqs):
for j in range(neqs):
gamma[p,1:,i,j] = W[i,j,k[i,j],:] * irf_resim[p,1:,i,j]
gamma_sort = np.sort(gamma, axis=0) #sort to get quantiles
indx = round(signif/2*repl)-1,round((1-signif/2)*repl)-1
lower = np.copy(irfs)
upper = np.copy(irfs)
for i in range(neqs):
for j in range(neqs):
lower[:,i,j] = irfs[:,i,j] + gamma_sort[indx[0],:,i,j]
upper[:,i,j] = irfs[:,i,j] + gamma_sort[indx[1],:,i,j]
return lower, upper | IRF Sims-Zha error band method 2.
This method Does not assume symmetric error bands around mean.
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse responses
repl : int, default 1000
Number of MC replications
signif : float (0 < signif < 1)
Significance level for error bars, defaults to 95% CI
seed : int, default None
np.random seed
burn : int, default 100
Number of initial simulated obs to discard
component : neqs x neqs array, default to largest for each
Index of column of eigenvector/value to use for each error band
Note: period of impulse (t=0) is not included when computing
principle component
References
----------
Sims, Christopher A., and Tao Zha. 1999. "Error Bands for Impulse
Response". Econometrica 67: 1113-1155. | err_band_sz2 | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/irf.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/irf.py | BSD-3-Clause |
def err_band_sz3(self, orth=False, svar=False, repl=1000, signif=0.05,
seed=None, burn=100, component=None):
"""
IRF Sims-Zha error band method 3. Does not assume symmetric error bands around mean.
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse responses
repl : int, default 1000
Number of MC replications
signif : float (0 < signif < 1)
Significance level for error bars, defaults to 95% CI
seed : int, default None
np.random seed
burn : int, default 100
Number of initial simulated obs to discard
component : vector length neqs, default to largest for each
Index of column of eigenvector/value to use for each error band
Note: period of impulse (t=0) is not included when computing
principle component
References
----------
Sims, Christopher A., and Tao Zha. 1999. "Error Bands for Impulse
Response". Econometrica 67: 1113-1155.
"""
model = self.model
periods = self.periods
irfs = self._choose_irfs(orth, svar)
neqs = self.neqs
irf_resim = model.irf_resim(orth=orth, repl=repl, steps=periods,
seed=seed, burn=100)
stack = np.zeros((neqs, repl, periods*neqs))
#stack left to right, up and down
for p in range(repl):
for i in range(neqs):
stack[i, p,:] = np.ravel(irf_resim[p,1:,:,i].T)
stack_cov=np.zeros((neqs, periods*neqs, periods*neqs))
W = np.zeros((neqs, periods*neqs, periods*neqs))
eigva = np.zeros((neqs, periods*neqs))
k = np.zeros(neqs, dtype=int)
if component is not None:
if np.size(component) != (neqs):
raise ValueError("Component array must be of length " + str(neqs))
if np.argmax(component) >= neqs*periods:
raise ValueError("Atleast one of the components does not exist")
else:
k = component
#compute for eigen decomp for each stack
for i in range(neqs):
stack_cov[i] = np.cov(stack[i],rowvar=0)
W[i], eigva[i], k[i] = util.eigval_decomp(stack_cov[i])
gamma = np.zeros((repl, periods+1, neqs, neqs))
for p in range(repl):
for j in range(neqs):
for i in range(neqs):
gamma[p,1:,i,j] = W[j,k[j],i*periods:(i+1)*periods] * irf_resim[p,1:,i,j]
if i == neqs-1:
gamma[p,1:,i,j] = W[j,k[j],i*periods:] * irf_resim[p,1:,i,j]
gamma_sort = np.sort(gamma, axis=0) #sort to get quantiles
indx = round(signif/2*repl)-1,round((1-signif/2)*repl)-1
lower = np.copy(irfs)
upper = np.copy(irfs)
for i in range(neqs):
for j in range(neqs):
lower[:,i,j] = irfs[:,i,j] + gamma_sort[indx[0],:,i,j]
upper[:,i,j] = irfs[:,i,j] + gamma_sort[indx[1],:,i,j]
return lower, upper | IRF Sims-Zha error band method 3. Does not assume symmetric error bands around mean.
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse responses
repl : int, default 1000
Number of MC replications
signif : float (0 < signif < 1)
Significance level for error bars, defaults to 95% CI
seed : int, default None
np.random seed
burn : int, default 100
Number of initial simulated obs to discard
component : vector length neqs, default to largest for each
Index of column of eigenvector/value to use for each error band
Note: period of impulse (t=0) is not included when computing
principle component
References
----------
Sims, Christopher A., and Tao Zha. 1999. "Error Bands for Impulse
Response". Econometrica 67: 1113-1155. | err_band_sz3 | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/irf.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/irf.py | BSD-3-Clause |
def _eigval_decomp_SZ(self, irf_resim):
"""
Returns
-------
W: array of eigenvectors
eigva: list of eigenvalues
k: matrix indicating column # of largest eigenvalue for each c_i,j
"""
neqs = self.neqs
periods = self.periods
cov_hold = np.zeros((neqs, neqs, periods, periods))
for i in range(neqs):
for j in range(neqs):
cov_hold[i,j,:,:] = np.cov(irf_resim[:,1:,i,j],rowvar=0)
W = np.zeros((neqs, neqs, periods, periods))
eigva = np.zeros((neqs, neqs, periods, 1))
k = np.zeros((neqs, neqs), dtype=int)
for i in range(neqs):
for j in range(neqs):
W[i,j,:,:], eigva[i,j,:,0], k[i,j] = util.eigval_decomp(cov_hold[i,j,:,:])
return W, eigva, k | Returns
-------
W: array of eigenvectors
eigva: list of eigenvalues
k: matrix indicating column # of largest eigenvalue for each c_i,j | _eigval_decomp_SZ | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/irf.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/irf.py | BSD-3-Clause |
def cum_effect_cov(self, orth=False):
"""
Compute asymptotic standard errors for cumulative impulse response
coefficients
Parameters
----------
orth : bool
Notes
-----
eq. 3.7.7 (non-orth), 3.7.10 (orth)
Returns
-------
"""
Ik = np.eye(self.neqs)
PIk = np.kron(self.P.T, Ik)
F = 0.
covs = self._empty_covm(self.periods + 1)
for i in range(self.periods + 1):
if i > 0:
F = F + self.G[i - 1]
if orth:
if i == 0:
apiece = 0
else:
Bn = np.dot(PIk, F)
apiece = Bn @ self.cov_a @ Bn.T
Bnbar = np.dot(np.kron(Ik, self.cum_effects[i]), self.H)
bpiece = (Bnbar @ self.cov_sig @ Bnbar.T) / self.T
covs[i] = apiece + bpiece
else:
if i == 0:
covs[i] = np.zeros((self.neqs**2, self.neqs**2))
continue
covs[i] = F @ self.cov_a @ F.T
return covs | Compute asymptotic standard errors for cumulative impulse response
coefficients
Parameters
----------
orth : bool
Notes
-----
eq. 3.7.7 (non-orth), 3.7.10 (orth)
Returns
------- | cum_effect_cov | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/irf.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/irf.py | BSD-3-Clause |
def cum_errband_mc(self, orth=False, repl=1000,
signif=0.05, seed=None, burn=100):
"""
IRF Monte Carlo integrated error bands of cumulative effect
"""
model = self.model
periods = self.periods
return model.irf_errband_mc(orth=orth, repl=repl,
steps=periods, signif=signif,
seed=seed, burn=burn, cum=True) | IRF Monte Carlo integrated error bands of cumulative effect | cum_errband_mc | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/irf.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/irf.py | BSD-3-Clause |
def lr_effect_cov(self, orth=False):
"""
Returns
-------
"""
lre = self.lr_effects
Finfty = np.kron(np.tile(lre.T, self.lags), lre)
Ik = np.eye(self.neqs)
if orth:
Binf = np.dot(np.kron(self.P.T, np.eye(self.neqs)), Finfty)
Binfbar = np.dot(np.kron(Ik, lre), self.H)
return (Binf @ self.cov_a @ Binf.T +
Binfbar @ self.cov_sig @ Binfbar.T)
else:
return Finfty @ self.cov_a @ Finfty.T | Returns
------- | lr_effect_cov | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/irf.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/irf.py | BSD-3-Clause |
def fit(self, A_guess=None, B_guess=None, maxlags=None, method='ols',
ic=None, trend='c', verbose=False, s_method='mle',
solver="bfgs", override=False, maxiter=500, maxfun=500):
"""
Fit the SVAR model and solve for structural parameters
Parameters
----------
A_guess : array_like, optional
A vector of starting values for all parameters to be estimated
in A.
B_guess : array_like, optional
A vector of starting values for all parameters to be estimated
in B.
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend, str {"c", "ct", "ctt", "n"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"n" - co constant, no trend
Note that these are prepended to the columns of the dataset.
s_method : {'mle'}
Estimation method for structural parameters
solver : {'nm', 'newton', 'bfgs', 'cg', 'ncg', 'powell'}
Solution method
See statsmodels.base for details
override : bool, default False
If True, returns estimates of A and B without checking
order or rank condition
maxiter : int, default 500
Number of iterations to perform in solution method
maxfun : int
Number of function evaluations to perform
Notes
-----
Lütkepohl pp. 146-153
Hamilton pp. 324-336
Returns
-------
est : SVARResults
"""
lags = maxlags
if ic is not None:
selections = self.select_order(maxlags=maxlags, verbose=verbose)
if ic not in selections:
raise ValueError("%s not recognized, must be among %s"
% (ic, sorted(selections)))
lags = selections[ic]
if verbose:
print('Using %d based on %s criterion' % (lags, ic))
else:
if lags is None:
lags = 1
self.nobs = len(self.endog) - lags
# initialize starting parameters
start_params = self._get_init_params(A_guess, B_guess)
return self._estimate_svar(start_params, lags, trend=trend,
solver=solver, override=override,
maxiter=maxiter, maxfun=maxfun) | Fit the SVAR model and solve for structural parameters
Parameters
----------
A_guess : array_like, optional
A vector of starting values for all parameters to be estimated
in A.
B_guess : array_like, optional
A vector of starting values for all parameters to be estimated
in B.
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend, str {"c", "ct", "ctt", "n"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"n" - co constant, no trend
Note that these are prepended to the columns of the dataset.
s_method : {'mle'}
Estimation method for structural parameters
solver : {'nm', 'newton', 'bfgs', 'cg', 'ncg', 'powell'}
Solution method
See statsmodels.base for details
override : bool, default False
If True, returns estimates of A and B without checking
order or rank condition
maxiter : int, default 500
Number of iterations to perform in solution method
maxfun : int
Number of function evaluations to perform
Notes
-----
Lütkepohl pp. 146-153
Hamilton pp. 324-336
Returns
-------
est : SVARResults | fit | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/svar_model.py | BSD-3-Clause |
def _get_init_params(self, A_guess, B_guess):
"""
Returns either the given starting or .1 if none are given.
"""
var_type = self.svar_type.lower()
n_masked_a = self.A_mask.sum()
if var_type in ['ab', 'a']:
if A_guess is None:
A_guess = np.array([.1]*n_masked_a)
else:
if len(A_guess) != n_masked_a:
msg = 'len(A_guess) = %s, there are %s parameters in A'
raise ValueError(msg % (len(A_guess), n_masked_a))
else:
A_guess = []
n_masked_b = self.B_mask.sum()
if var_type in ['ab', 'b']:
if B_guess is None:
B_guess = np.array([.1]*n_masked_b)
else:
if len(B_guess) != n_masked_b:
msg = 'len(B_guess) = %s, there are %s parameters in B'
raise ValueError(msg % (len(B_guess), n_masked_b))
else:
B_guess = []
return np.r_[A_guess, B_guess] | Returns either the given starting or .1 if none are given. | _get_init_params | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/svar_model.py | BSD-3-Clause |
def _estimate_svar(self, start_params, lags, maxiter, maxfun,
trend='c', solver="nm", override=False):
"""
lags : int
trend : {str, None}
As per above
"""
k_trend = util.get_trendorder(trend)
y = self.endog
z = util.get_var_endog(y, lags, trend=trend, has_constant='raise')
y_sample = y[lags:]
# Lutkepohl p75, about 5x faster than stated formula
var_params = np.linalg.lstsq(z, y_sample, rcond=-1)[0]
resid = y_sample - np.dot(z, var_params)
# Unbiased estimate of covariance matrix $\Sigma_u$ of the white noise
# process $u$
# equivalent definition
# .. math:: \frac{1}{T - Kp - 1} Y^\prime (I_T - Z (Z^\prime Z)^{-1}
# Z^\prime) Y
# Ref: Lutkepohl p.75
# df_resid right now is T - Kp - 1, which is a suggested correction
avobs = len(y_sample)
df_resid = avobs - (self.neqs * lags + k_trend)
sse = np.dot(resid.T, resid)
#TODO: should give users the option to use a dof correction or not
omega = sse / df_resid
self.sigma_u = omega
A, B = self._solve_AB(start_params, override=override,
solver=solver,
maxiter=maxiter)
A_mask = self.A_mask
B_mask = self.B_mask
return SVARResults(y, z, var_params, omega, lags,
names=self.endog_names, trend=trend,
dates=self.data.dates, model=self,
A=A, B=B, A_mask=A_mask, B_mask=B_mask) | lags : int
trend : {str, None}
As per above | _estimate_svar | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/svar_model.py | BSD-3-Clause |
def loglike(self, params):
"""
Loglikelihood for SVAR model
Notes
-----
This method assumes that the autoregressive parameters are
first estimated, then likelihood with structural parameters
is estimated
"""
#TODO: this does not look robust if A or B is None
A = self.A
B = self.B
A_mask = self.A_mask
B_mask = self.B_mask
A_len = len(A[A_mask])
B_len = len(B[B_mask])
if A is not None:
A[A_mask] = params[:A_len]
if B is not None:
B[B_mask] = params[A_len:A_len+B_len]
nobs = self.nobs
neqs = self.neqs
sigma_u = self.sigma_u
W = np.dot(npl.inv(B),A)
trc_in = np.dot(np.dot(W.T,W),sigma_u)
sign, b_logdet = slogdet(B**2) #numpy 1.4 compat
b_slogdet = sign * b_logdet
likl = -nobs/2. * (neqs * np.log(2 * np.pi) -
np.log(npl.det(A)**2) + b_slogdet +
np.trace(trc_in))
return likl | Loglikelihood for SVAR model
Notes
-----
This method assumes that the autoregressive parameters are
first estimated, then likelihood with structural parameters
is estimated | loglike | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/svar_model.py | BSD-3-Clause |
def score(self, AB_mask):
"""
Return the gradient of the loglike at AB_mask.
Parameters
----------
AB_mask : unknown values of A and B matrix concatenated
Notes
-----
Return numerical gradient
"""
loglike = self.loglike
if AB_mask.ndim > 1:
AB_mask = AB_mask.ravel()
grad = approx_fprime(AB_mask, loglike, epsilon=1e-8)
# workaround shape of grad if only one parameter #9302
if AB_mask.size == 1 and grad.ndim == 2:
grad = grad.ravel()
return grad | Return the gradient of the loglike at AB_mask.
Parameters
----------
AB_mask : unknown values of A and B matrix concatenated
Notes
-----
Return numerical gradient | score | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/svar_model.py | BSD-3-Clause |
def hessian(self, AB_mask):
"""
Returns numerical hessian.
"""
loglike = self.loglike
if AB_mask.ndim > 1:
AB_mask = AB_mask.ravel()
return approx_hess(AB_mask, loglike) | Returns numerical hessian. | hessian | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/svar_model.py | BSD-3-Clause |
def _solve_AB(self, start_params, maxiter, override=False, solver='bfgs'):
"""
Solves for MLE estimate of structural parameters
Parameters
----------
override : bool, default False
If True, returns estimates of A and B without checking
order or rank condition
solver : str or None, optional
Solver to be used. The default is 'nm' (Nelder-Mead). Other
choices are 'bfgs', 'newton' (Newton-Raphson), 'cg'
conjugate, 'ncg' (non-conjugate gradient), and 'powell'.
maxiter : int, optional
The maximum number of iterations. Default is 500.
Returns
-------
A_solve, B_solve: ML solutions for A, B matrices
"""
#TODO: this could stand a refactor
A_mask = self.A_mask
B_mask = self.B_mask
A = self.A
B = self.B
A_len = len(A[A_mask])
A[A_mask] = start_params[:A_len]
B[B_mask] = start_params[A_len:]
if not override:
J = self._compute_J(A, B)
self.check_order(J)
self.check_rank(J)
else: #TODO: change to a warning?
print("Order/rank conditions have not been checked")
if solver == "bfgs":
kwargs = {"gtol": 1e-5}
else:
kwargs = {}
retvals = super().fit(start_params=start_params,
method=solver, maxiter=maxiter,
disp=False, **kwargs).params
if retvals.ndim > 1:
retvals = retvals.ravel()
A[A_mask] = retvals[:A_len]
B[B_mask] = retvals[A_len:]
return A, B | Solves for MLE estimate of structural parameters
Parameters
----------
override : bool, default False
If True, returns estimates of A and B without checking
order or rank condition
solver : str or None, optional
Solver to be used. The default is 'nm' (Nelder-Mead). Other
choices are 'bfgs', 'newton' (Newton-Raphson), 'cg'
conjugate, 'ncg' (non-conjugate gradient), and 'powell'.
maxiter : int, optional
The maximum number of iterations. Default is 500.
Returns
-------
A_solve, B_solve: ML solutions for A, B matrices | _solve_AB | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/svar_model.py | BSD-3-Clause |
def orth_ma_rep(self, maxn=10, P=None):
"""
Unavailable for SVAR
"""
raise NotImplementedError | Unavailable for SVAR | orth_ma_rep | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/svar_model.py | BSD-3-Clause |
def svar_ma_rep(self, maxn=10, P=None):
"""
Compute Structural MA coefficient matrices using MLE
of A, B
"""
if P is None:
A_solve = self.A_solve
B_solve = self.B_solve
P = np.dot(npl.inv(A_solve), B_solve)
ma_mats = self.ma_rep(maxn=maxn)
return np.array([np.dot(coefs, P) for coefs in ma_mats]) | Compute Structural MA coefficient matrices using MLE
of A, B | svar_ma_rep | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/svar_model.py | BSD-3-Clause |
def irf(self, periods=10, var_order=None):
"""
Analyze structural impulse responses to shocks in system
Parameters
----------
periods : int
Returns
-------
irf : IRAnalysis
"""
A = self.A
B= self.B
P = np.dot(npl.inv(A), B)
return IRAnalysis(self, P=P, periods=periods, svar=True) | Analyze structural impulse responses to shocks in system
Parameters
----------
periods : int
Returns
-------
irf : IRAnalysis | irf | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/svar_model.py | BSD-3-Clause |
def sirf_errband_mc(self, orth=False, repl=1000, steps=10,
signif=0.05, seed=None, burn=100, cum=False):
"""
Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse response error bands
repl : int
number of Monte Carlo replications to perform
steps : int, default 10
number of impulse response periods
signif : float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed : int
np.random.seed for replications
burn : int
number of initial observations to discard for simulation
cum : bool, default False
produce cumulative irf error bands
Notes
-----
Lütkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors
"""
neqs = self.neqs
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
nobs = self.nobs
ma_coll = np.zeros((repl, steps + 1, neqs, neqs))
A = self.A
B = self.B
A_mask = self.A_mask
B_mask = self.B_mask
A_pass = self.model.A_original
B_pass = self.model.B_original
s_type = self.model.svar_type
g_list = []
def agg(impulses):
if cum:
return impulses.cumsum(axis=0)
return impulses
opt_A = A[A_mask]
opt_B = B[B_mask]
for i in range(repl):
# discard first hundred to correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u, seed=seed,
steps=nobs + burn)
sim = sim[burn:]
smod = SVAR(sim, svar_type=s_type, A=A_pass, B=B_pass)
if i == 10:
# Use first 10 to update starting val for remainder of fits
mean_AB = np.mean(g_list, axis=0)
split = len(A[A_mask])
opt_A = mean_AB[:split]
opt_B = mean_AB[split:]
sres = smod.fit(maxlags=k_ar, A_guess=opt_A, B_guess=opt_B)
if i < 10:
# save estimates for starting val if in first 10
g_list.append(np.append(sres.A[A_mask].tolist(),
sres.B[B_mask].tolist()))
ma_coll[i] = agg(sres.svar_ma_rep(maxn=steps))
ma_sort = np.sort(ma_coll, axis=0) # sort to get quantiles
index = (int(round(signif / 2 * repl) - 1),
int(round((1 - signif / 2) * repl) - 1))
lower = ma_sort[index[0], :, :, :]
upper = ma_sort[index[1], :, :, :]
return lower, upper | Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse response error bands
repl : int
number of Monte Carlo replications to perform
steps : int, default 10
number of impulse response periods
signif : float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed : int
np.random.seed for replications
burn : int
number of initial observations to discard for simulation
cum : bool, default False
produce cumulative irf error bands
Notes
-----
Lütkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors | sirf_errband_mc | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/svar_model.py | BSD-3-Clause |
def plot_mts(Y, names=None, index=None):
"""
Plot multiple time series
"""
import matplotlib.pyplot as plt
k = Y.shape[1]
rows, cols = k, 1
fig = plt.figure(figsize=(10, 10))
for j in range(k):
ts = Y[:, j]
ax = fig.add_subplot(rows, cols, j+1)
if index is not None:
ax.plot(index, ts)
else:
ax.plot(ts)
if names is not None:
ax.set_title(names[j])
return fig | Plot multiple time series | plot_mts | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/plotting.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/plotting.py | BSD-3-Clause |
def plot_with_error(y, error, x=None, axes=None, value_fmt='k',
error_fmt='k--', alpha=0.05, stderr_type = 'asym'):
"""
Make plot with optional error bars
Parameters
----------
y :
error : array or None
"""
import matplotlib.pyplot as plt
if axes is None:
axes = plt.gca()
x = x if x is not None else lrange(len(y))
def plot_action(y, fmt):
return axes.plot(x, y, fmt)
plot_action(y, value_fmt)
#changed this
if error is not None:
if stderr_type == 'asym':
q = util.norm_signif_level(alpha)
plot_action(y - q * error, error_fmt)
plot_action(y + q * error, error_fmt)
if stderr_type in ('mc','sz1','sz2','sz3'):
plot_action(error[0], error_fmt)
plot_action(error[1], error_fmt) | Make plot with optional error bars
Parameters
----------
y :
error : array or None | plot_with_error | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/plotting.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/plotting.py | BSD-3-Clause |
def plot_full_acorr(acorr, fontsize=8, linewidth=8, xlabel=None,
err_bound=None):
"""
Parameters
----------
"""
import matplotlib.pyplot as plt
config = MPLConfigurator()
config.set_fontsize(fontsize)
k = acorr.shape[1]
fig, axes = plt.subplots(k, k, figsize=(10, 10), squeeze=False)
for i in range(k):
for j in range(k):
ax = axes[i][j]
acorr_plot(acorr[:, i, j], linewidth=linewidth,
xlabel=xlabel, ax=ax)
if err_bound is not None:
ax.axhline(err_bound, color='k', linestyle='--')
ax.axhline(-err_bound, color='k', linestyle='--')
adjust_subplots()
config.revert()
return fig | Parameters
---------- | plot_full_acorr | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/plotting.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/plotting.py | BSD-3-Clause |
def irf_grid_plot(values, stderr, impcol, rescol, names, title,
signif=0.05, hlines=None, subplot_params=None,
plot_params=None, figsize=(10,10), stderr_type='asym'):
"""
Reusable function to make flexible grid plots of impulse responses and
comulative effects
values : (T + 1) x k x k
stderr : T x k x k
hlines : k x k
"""
import matplotlib.pyplot as plt
if subplot_params is None:
subplot_params = {}
if plot_params is None:
plot_params = {}
nrows, ncols, to_plot = _get_irf_plot_config(names, impcol, rescol)
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
squeeze=False, figsize=figsize)
# fill out space
adjust_subplots()
fig.suptitle(title, fontsize=14)
subtitle_temp = r'%s$\rightarrow$%s'
k = len(names)
rng = lrange(len(values))
for (j, i, ai, aj) in to_plot:
ax = axes[ai][aj]
# HACK?
if stderr is not None:
if stderr_type == 'asym':
sig = np.sqrt(stderr[:, j * k + i, j * k + i])
plot_with_error(values[:, i, j], sig, x=rng, axes=ax,
alpha=signif, value_fmt='b', stderr_type=stderr_type)
if stderr_type in ('mc','sz1','sz2','sz3'):
errs = stderr[0][:, i, j], stderr[1][:, i, j]
plot_with_error(values[:, i, j], errs, x=rng, axes=ax,
alpha=signif, value_fmt='b', stderr_type=stderr_type)
else:
plot_with_error(values[:, i, j], None, x=rng, axes=ax,
value_fmt='b')
ax.axhline(0, color='k')
if hlines is not None:
ax.axhline(hlines[i,j], color='k')
sz = subplot_params.get('fontsize', 12)
ax.set_title(subtitle_temp % (names[j], names[i]), fontsize=sz)
return fig | Reusable function to make flexible grid plots of impulse responses and
comulative effects
values : (T + 1) x k x k
stderr : T x k x k
hlines : k x k | irf_grid_plot | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/plotting.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/plotting.py | BSD-3-Clause |
def make(self, endog_names=None, exog_names=None):
"""
Summary of VAR model
"""
buf = StringIO()
buf.write(self._header_table() + '\n')
buf.write(self._stats_table() + '\n')
buf.write(self._coef_table() + '\n')
buf.write(self._resid_info() + '\n')
return buf.getvalue() | Summary of VAR model | make | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/output.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/output.py | BSD-3-Clause |
def is_stable(coefs, verbose=False):
"""
Determine stability of VAR(p) system by examining the eigenvalues of the
VAR(1) representation
Parameters
----------
coefs : ndarray (p x k x k)
Returns
-------
is_stable : bool
"""
A_var1 = util.comp_matrix(coefs)
eigs = np.linalg.eigvals(A_var1)
if verbose:
print("Eigenvalues of VAR(1) rep")
for val in np.abs(eigs):
print(val)
return (np.abs(eigs) <= 1).all() | Determine stability of VAR(p) system by examining the eigenvalues of the
VAR(1) representation
Parameters
----------
coefs : ndarray (p x k x k)
Returns
-------
is_stable : bool | is_stable | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def var_acf(coefs, sig_u, nlags=None):
"""
Compute autocovariance function ACF_y(h) up to nlags of stable VAR(p)
process
Parameters
----------
coefs : ndarray (p x k x k)
Coefficient matrices A_i
sig_u : ndarray (k x k)
Covariance of white noise process u_t
nlags : int, optional
Defaults to order p of system
Notes
-----
Ref: Lütkepohl p.28-29
Returns
-------
acf : ndarray, (p, k, k)
"""
p, k, _ = coefs.shape
if nlags is None:
nlags = p
# p x k x k, ACF for lags 0, ..., p-1
result = np.zeros((nlags + 1, k, k))
result[:p] = _var_acf(coefs, sig_u)
# yule-walker equations
for h in range(p, nlags + 1):
# compute ACF for lag=h
# G(h) = A_1 G(h-1) + ... + A_p G(h-p)
for j in range(p):
result[h] += np.dot(coefs[j], result[h - j - 1])
return result | Compute autocovariance function ACF_y(h) up to nlags of stable VAR(p)
process
Parameters
----------
coefs : ndarray (p x k x k)
Coefficient matrices A_i
sig_u : ndarray (k x k)
Covariance of white noise process u_t
nlags : int, optional
Defaults to order p of system
Notes
-----
Ref: Lütkepohl p.28-29
Returns
-------
acf : ndarray, (p, k, k) | var_acf | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def _var_acf(coefs, sig_u):
"""
Compute autocovariance function ACF_y(h) for h=1,...,p
Notes
-----
Lütkepohl (2005) p.29
"""
p, k, k2 = coefs.shape
assert k == k2
A = util.comp_matrix(coefs)
# construct VAR(1) noise covariance
SigU = np.zeros((k * p, k * p))
SigU[:k, :k] = sig_u
# vec(ACF) = (I_(kp)^2 - kron(A, A))^-1 vec(Sigma_U)
vecACF = np.linalg.solve(np.eye((k * p) ** 2) - np.kron(A, A), vec(SigU))
acf = unvec(vecACF)
acf = [acf[:k, k * i : k * (i + 1)] for i in range(p)]
acf = np.array(acf)
return acf | Compute autocovariance function ACF_y(h) for h=1,...,p
Notes
-----
Lütkepohl (2005) p.29 | _var_acf | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def forecast(y, coefs, trend_coefs, steps, exog=None):
"""
Produce linear minimum MSE forecast
Parameters
----------
y : ndarray (k_ar x neqs)
coefs : ndarray (k_ar x neqs x neqs)
trend_coefs : ndarray (1 x neqs) or (neqs)
steps : int
exog : ndarray (trend_coefs.shape[1] x neqs)
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lütkepohl p. 37
"""
coefs = np.asarray(coefs)
if coefs.ndim != 3:
raise ValueError("coefs must be an array with 3 dimensions")
p, k = coefs.shape[:2]
if y.shape[0] < p:
raise ValueError(
f"y must by have at least order ({p}) observations. " f"Got {y.shape[0]}."
)
# initial value
forcs = np.zeros((steps, k))
if exog is not None and trend_coefs is not None:
forcs += np.dot(exog, trend_coefs)
# to make existing code (with trend_coefs=intercept and without exog) work:
elif exog is None and trend_coefs is not None:
forcs += trend_coefs
# h=0 forecast should be latest observation
# forcs[0] = y[-1]
# make indices easier to think about
for h in range(1, steps + 1):
# y_t(h) = intercept + sum_1^p A_i y_t_(h-i)
f = forcs[h - 1]
for i in range(1, p + 1):
# slightly hackish
if h - i <= 0:
# e.g. when h=1, h-1 = 0, which is y[-1]
prior_y = y[h - i - 1]
else:
# e.g. when h=2, h-1=1, which is forcs[0]
prior_y = forcs[h - i - 1]
# i=1 is coefs[0]
f = f + np.dot(coefs[i - 1], prior_y)
forcs[h - 1] = f
return forcs | Produce linear minimum MSE forecast
Parameters
----------
y : ndarray (k_ar x neqs)
coefs : ndarray (k_ar x neqs x neqs)
trend_coefs : ndarray (1 x neqs) or (neqs)
steps : int
exog : ndarray (trend_coefs.shape[1] x neqs)
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lütkepohl p. 37 | forecast | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def _forecast_vars(steps, ma_coefs, sig_u):
"""_forecast_vars function used by VECMResults. Note that the definition
of the local variable covs is the same as in VARProcess and as such it
differs from the one in VARResults!
Parameters
----------
steps
ma_coefs
sig_u
Returns
-------
"""
covs = mse(ma_coefs, sig_u, steps)
# Take diagonal for each cov
neqs = len(sig_u)
inds = np.arange(neqs)
return covs[:, inds, inds] | _forecast_vars function used by VECMResults. Note that the definition
of the local variable covs is the same as in VARProcess and as such it
differs from the one in VARResults!
Parameters
----------
steps
ma_coefs
sig_u
Returns
------- | _forecast_vars | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def test_normality(results, signif=0.05):
"""
Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test
Parameters
----------
results : VARResults or statsmodels.tsa.vecm.vecm.VECMResults
signif : float
The test's significance level.
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process
Returns
-------
result : NormalityTestResults
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer.
.. [2] Kilian, L. & Demiroglu, U. (2000). "Residual-Based Tests for
Normality in Autoregressions: Asymptotic Theory and Simulation
Evidence." Journal of Business & Economic Statistics
"""
resid_c = results.resid - results.resid.mean(0)
sig = np.dot(resid_c.T, resid_c) / results.nobs
Pinv = np.linalg.inv(np.linalg.cholesky(sig))
w = np.dot(Pinv, resid_c.T)
b1 = (w**3).sum(1)[:, None] / results.nobs
b2 = (w**4).sum(1)[:, None] / results.nobs - 3
lam_skew = results.nobs * np.dot(b1.T, b1) / 6
lam_kurt = results.nobs * np.dot(b2.T, b2) / 24
lam_omni = float(np.squeeze(lam_skew + lam_kurt))
omni_dist = stats.chi2(results.neqs * 2)
omni_pvalue = float(omni_dist.sf(lam_omni))
crit_omni = float(omni_dist.ppf(1 - signif))
return NormalityTestResults(
lam_omni, crit_omni, omni_pvalue, results.neqs * 2, signif
) | Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test
Parameters
----------
results : VARResults or statsmodels.tsa.vecm.vecm.VECMResults
signif : float
The test's significance level.
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process
Returns
-------
result : NormalityTestResults
References
----------
.. [1] Lütkepohl, H. 2005. *New Introduction to Multiple Time Series*
*Analysis*. Springer.
.. [2] Kilian, L. & Demiroglu, U. (2000). "Residual-Based Tests for
Normality in Autoregressions: Asymptotic Theory and Simulation
Evidence." Journal of Business & Economic Statistics | test_normality | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def predict(self, params, start=None, end=None, lags=1, trend="c"):
"""
Returns in-sample predictions or forecasts
"""
params = np.array(params)
if start is None:
start = lags
# Handle start, end
(
start,
end,
out_of_sample,
prediction_index,
) = self._get_prediction_index(start, end)
if end < start:
raise ValueError("end is before start")
if end == start + out_of_sample:
return np.array([])
k_trend = util.get_trendorder(trend)
k = self.neqs
k_ar = lags
predictedvalues = np.zeros((end + 1 - start + out_of_sample, k))
if k_trend != 0:
intercept = params[:k_trend]
predictedvalues += intercept
y = self.endog
x = util.get_var_endog(y, lags, trend=trend, has_constant="raise")
fittedvalues = np.dot(x, params)
fv_start = start - k_ar
pv_end = min(len(predictedvalues), len(fittedvalues) - fv_start)
fv_end = min(len(fittedvalues), end - k_ar + 1)
predictedvalues[:pv_end] = fittedvalues[fv_start:fv_end]
if not out_of_sample:
return predictedvalues
# fit out of sample
y = y[-k_ar:]
coefs = params[k_trend:].reshape((k_ar, k, k)).swapaxes(1, 2)
predictedvalues[pv_end:] = forecast(y, coefs, intercept, out_of_sample)
return predictedvalues | Returns in-sample predictions or forecasts | predict | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def fit(
self,
maxlags: int | None = None,
method="ols",
ic=None,
trend="c",
verbose=False,
):
# todo: this code is only supporting deterministic terms as exog.
# This means that all exog-variables have lag 0. If dealing with
# different exogs is necessary, a `lags_exog`-parameter might make
# sense (e.g. a sequence of ints specifying lags).
# Alternatively, leading zeros for exog-variables with smaller number
# of lags than the maximum number of exog-lags might work.
"""
Fit the VAR model
Parameters
----------
maxlags : {int, None}, default None
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend : str {"c", "ct", "ctt", "n"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"n" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Returns
-------
VARResults
Estimation results
Notes
-----
See Lütkepohl pp. 146-153 for implementation details.
"""
lags = maxlags
if trend not in ["c", "ct", "ctt", "n"]:
raise ValueError(f"trend '{trend}' not supported for VAR")
if ic is not None:
selections = self.select_order(maxlags=maxlags)
if not hasattr(selections, ic):
raise ValueError(
"%s not recognized, must be among %s" % (ic, sorted(selections))
)
lags = getattr(selections, ic)
if verbose:
print(selections)
print("Using %d based on %s criterion" % (lags, ic))
else:
if lags is None:
lags = 1
k_trend = util.get_trendorder(trend)
orig_exog_names = self.exog_names
self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
self.nobs = self.n_totobs - lags
# add exog to data.xnames (necessary because the length of xnames also
# determines the allowed size of VARResults.params)
if self.exog is not None:
if orig_exog_names:
x_names_to_add = orig_exog_names
else:
x_names_to_add = [("exog%d" % i) for i in range(self.exog.shape[1])]
self.data.xnames = (
self.data.xnames[:k_trend] + x_names_to_add + self.data.xnames[k_trend:]
)
self.data.cov_names = pd.MultiIndex.from_product(
(self.data.xnames, self.data.ynames)
)
return self._estimate_var(lags, trend=trend) | Fit the VAR model
Parameters
----------
maxlags : {int, None}, default None
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend : str {"c", "ct", "ctt", "n"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"n" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Returns
-------
VARResults
Estimation results
Notes
-----
See Lütkepohl pp. 146-153 for implementation details. | fit | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def _estimate_var(self, lags, offset=0, trend="c"):
"""
lags : int
Lags of the endogenous variable.
offset : int
Periods to drop from beginning-- for order selection so it's an
apples-to-apples comparison
trend : {str, None}
As per above
"""
# have to do this again because select_order does not call fit
self.k_trend = k_trend = util.get_trendorder(trend)
if offset < 0: # pragma: no cover
raise ValueError("offset must be >= 0")
nobs = self.n_totobs - lags - offset
endog = self.endog[offset:]
exog = None if self.exog is None else self.exog[offset:]
z = util.get_var_endog(endog, lags, trend=trend, has_constant="raise")
if exog is not None:
# TODO: currently only deterministic terms supported (exoglags==0)
# and since exoglags==0, x will be an array of size 0.
x = util.get_var_endog(exog[-nobs:], 0, trend="n", has_constant="raise")
x_inst = exog[-nobs:]
x = np.column_stack((x, x_inst))
del x_inst # free memory
temp_z = z
z = np.empty((x.shape[0], x.shape[1] + z.shape[1]))
z[:, : self.k_trend] = temp_z[:, : self.k_trend]
z[:, self.k_trend : self.k_trend + x.shape[1]] = x
z[:, self.k_trend + x.shape[1] :] = temp_z[:, self.k_trend :]
del temp_z, x # free memory
# the following modification of z is necessary to get the same results
# as JMulTi for the constant-term-parameter...
for i in range(self.k_trend):
if (np.diff(z[:, i]) == 1).all(): # modify the trend-column
z[:, i] += lags
# make the same adjustment for the quadratic term
if (np.diff(np.sqrt(z[:, i])) == 1).all():
z[:, i] = (np.sqrt(z[:, i]) + lags) ** 2
y_sample = endog[lags:]
# Lütkepohl p75, about 5x faster than stated formula
params = np.linalg.lstsq(z, y_sample, rcond=1e-15)[0]
resid = y_sample - np.dot(z, params)
# Unbiased estimate of covariance matrix $\Sigma_u$ of the white noise
# process $u$
# equivalent definition
# .. math:: \frac{1}{T - Kp - 1} Y^\prime (I_T - Z (Z^\prime Z)^{-1}
# Z^\prime) Y
# Ref: Lütkepohl p.75
# df_resid right now is T - Kp - 1, which is a suggested correction
avobs = len(y_sample)
if exog is not None:
k_trend += exog.shape[1]
df_resid = avobs - (self.neqs * lags + k_trend)
sse = np.dot(resid.T, resid)
if df_resid:
omega = sse / df_resid
else:
omega = np.full_like(sse, np.nan)
varfit = VARResults(
endog,
z,
params,
omega,
lags,
names=self.endog_names,
trend=trend,
dates=self.data.dates,
model=self,
exog=self.exog,
)
return VARResultsWrapper(varfit) | lags : int
Lags of the endogenous variable.
offset : int
Periods to drop from beginning-- for order selection so it's an
apples-to-apples comparison
trend : {str, None}
As per above | _estimate_var | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def select_order(self, maxlags=None, trend="c"):
"""
Compute lag order selections based on each of the available information
criteria
Parameters
----------
maxlags : int
if None, defaults to 12 * (nobs/100.)**(1./4)
trend : str {"n", "c", "ct", "ctt"}
* "n" - no deterministic terms
* "c" - constant term
* "ct" - constant and linear term
* "ctt" - constant, linear, and quadratic term
Returns
-------
selections : LagOrderResults
"""
ntrend = len(trend) if trend.startswith("c") else 0
max_estimable = (self.n_totobs - self.neqs - ntrend) // (1 + self.neqs)
if maxlags is None:
maxlags = int(round(12 * (len(self.endog) / 100.0) ** (1 / 4.0)))
# TODO: This expression shows up in a bunch of places, but
# in some it is `int` and in others `np.ceil`. Also in some
# it multiplies by 4 instead of 12. Let's put these all in
# one place and document when to use which variant.
# Ensure enough obs to estimate model with maxlags
maxlags = min(maxlags, max_estimable)
else:
if maxlags > max_estimable:
raise ValueError(
"maxlags is too large for the number of observations and "
"the number of equations. The largest model cannot be "
"estimated."
)
ics = defaultdict(list)
p_min = 0 if self.exog is not None or trend != "n" else 1
for p in range(p_min, maxlags + 1):
# exclude some periods to same amount of data used for each lag
# order
result = self._estimate_var(p, offset=maxlags - p, trend=trend)
for k, v in result.info_criteria.items():
ics[k].append(v)
selected_orders = {k: np.array(v).argmin() + p_min for k, v in ics.items()}
return LagOrderResults(ics, selected_orders, vecm=False) | Compute lag order selections based on each of the available information
criteria
Parameters
----------
maxlags : int
if None, defaults to 12 * (nobs/100.)**(1./4)
trend : str {"n", "c", "ct", "ctt"}
* "n" - no deterministic terms
* "c" - constant term
* "ct" - constant and linear term
* "ctt" - constant, linear, and quadratic term
Returns
-------
selections : LagOrderResults | select_order | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def from_formula(cls, formula, data, subset=None, drop_cols=None, *args, **kwargs):
"""
Not implemented. Formulas are not supported for VAR models.
"""
raise NotImplementedError("formulas are not supported for VAR models.") | Not implemented. Formulas are not supported for VAR models. | from_formula | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def get_eq_index(self, name):
"""Return integer position of requested equation name"""
return util.get_index(self.names, name) | Return integer position of requested equation name | get_eq_index | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def is_stable(self, verbose=False):
"""Determine stability based on model coefficients
Parameters
----------
verbose : bool
Print eigenvalues of the VAR(1) companion
Notes
-----
Checks if det(I - Az) = 0 for any mod(z) <= 1, so all the eigenvalues of
the companion matrix must lie outside the unit circle
"""
return is_stable(self.coefs, verbose=verbose) | Determine stability based on model coefficients
Parameters
----------
verbose : bool
Print eigenvalues of the VAR(1) companion
Notes
-----
Checks if det(I - Az) = 0 for any mod(z) <= 1, so all the eigenvalues of
the companion matrix must lie outside the unit circle | is_stable | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def simulate_var(
self, steps=None, offset=None, seed=None, initial_values=None, nsimulations=None
):
"""
simulate the VAR(p) process for the desired number of steps
Parameters
----------
steps : None or int
number of observations to simulate, this includes the initial
observations to start the autoregressive process.
If offset is not None, then exog of the model are used if they were
provided in the model
offset : None or ndarray (steps, neqs)
If not None, then offset is added as an observation specific
intercept to the autoregression. If it is None and either trend
(including intercept) or exog were used in the VAR model, then
the linear predictor of those components will be used as offset.
This should have the same number of rows as steps, and the same
number of columns as endogenous variables (neqs).
seed : {None, int}
If seed is not None, then it will be used with for the random
variables generated by numpy.random.
initial_values : array_like, optional
Initial values for use in the simulation. Shape should be
(nlags, neqs) or (neqs,). Values should be ordered from less to
most recent. Note that this values will be returned by the
simulation as the first values of `endog_simulated` and they
will count for the total number of steps.
nsimulations : {None, int}
Number of simulations to perform. If `nsimulations` is None it will
perform one simulation and return value will have shape (steps, neqs).
Returns
-------
endog_simulated : nd_array
Endog of the simulated VAR process. Shape will be (nsimulations, steps, neqs)
or (steps, neqs) if `nsimulations` is None.
"""
steps_ = None
if offset is None:
if self.k_exog_user > 0 or self.k_trend > 1:
# if more than intercept
# endog_lagged contains all regressors, trend, exog_user
# and lagged endog, trimmed initial observations
offset = self.endog_lagged[:, : self.k_exog].dot(self.coefs_exog.T)
steps_ = self.endog_lagged.shape[0]
else:
offset = self.intercept
else:
steps_ = offset.shape[0]
# default, but over written if exog or offset are used
if steps is None:
if steps_ is None:
steps = 1000
else:
steps = steps_
else:
if steps_ is not None and steps != steps_:
raise ValueError(
"if exog or offset are used, then steps must"
"be equal to their length or None"
)
y = util.varsim(
self.coefs,
offset,
self.sigma_u,
steps=steps,
seed=seed,
initial_values=initial_values,
nsimulations=nsimulations,
)
return y | simulate the VAR(p) process for the desired number of steps
Parameters
----------
steps : None or int
number of observations to simulate, this includes the initial
observations to start the autoregressive process.
If offset is not None, then exog of the model are used if they were
provided in the model
offset : None or ndarray (steps, neqs)
If not None, then offset is added as an observation specific
intercept to the autoregression. If it is None and either trend
(including intercept) or exog were used in the VAR model, then
the linear predictor of those components will be used as offset.
This should have the same number of rows as steps, and the same
number of columns as endogenous variables (neqs).
seed : {None, int}
If seed is not None, then it will be used with for the random
variables generated by numpy.random.
initial_values : array_like, optional
Initial values for use in the simulation. Shape should be
(nlags, neqs) or (neqs,). Values should be ordered from less to
most recent. Note that this values will be returned by the
simulation as the first values of `endog_simulated` and they
will count for the total number of steps.
nsimulations : {None, int}
Number of simulations to perform. If `nsimulations` is None it will
perform one simulation and return value will have shape (steps, neqs).
Returns
-------
endog_simulated : nd_array
Endog of the simulated VAR process. Shape will be (nsimulations, steps, neqs)
or (steps, neqs) if `nsimulations` is None. | simulate_var | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def plotsim(self, steps=None, offset=None, seed=None):
"""
Plot a simulation from the VAR(p) process for the desired number of
steps
"""
y = self.simulate_var(steps=steps, offset=offset, seed=seed)
return plotting.plot_mts(y) | Plot a simulation from the VAR(p) process for the desired number of
steps | plotsim | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def _char_mat(self):
"""Characteristic matrix of the VAR"""
return np.eye(self.neqs) - self.coefs.sum(0) | Characteristic matrix of the VAR | _char_mat | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def acf(self, nlags=None):
"""Compute theoretical autocovariance function
Returns
-------
acf : ndarray (p x k x k)
"""
return var_acf(self.coefs, self.sigma_u, nlags=nlags) | Compute theoretical autocovariance function
Returns
-------
acf : ndarray (p x k x k) | acf | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def acorr(self, nlags=None):
"""
Autocorrelation function
Parameters
----------
nlags : int or None
The number of lags to include in the autocovariance function. The
default is the number of lags included in the model.
Returns
-------
acorr : ndarray
Autocorrelation and cross correlations (nlags, neqs, neqs)
"""
return util.acf_to_acorr(self.acf(nlags=nlags)) | Autocorrelation function
Parameters
----------
nlags : int or None
The number of lags to include in the autocovariance function. The
default is the number of lags included in the model.
Returns
-------
acorr : ndarray
Autocorrelation and cross correlations (nlags, neqs, neqs) | acorr | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def plot_acorr(self, nlags=10, linewidth=8):
"""Plot theoretical autocorrelation function"""
fig = plotting.plot_full_acorr(self.acorr(nlags=nlags), linewidth=linewidth)
return fig | Plot theoretical autocorrelation function | plot_acorr | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def forecast(self, y, steps, exog_future=None):
"""Produce linear minimum MSE forecasts for desired number of steps
ahead, using prior values y
Parameters
----------
y : ndarray (p x k)
steps : int
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lütkepohl pp 37-38
"""
if self.exog is None and exog_future is not None:
raise ValueError(
"No exog in model, so no exog_future supported " "in forecast method."
)
if self.exog is not None and exog_future is None:
raise ValueError(
"Please provide an exog_future argument to " "the forecast method."
)
exog_future = array_like(exog_future, "exog_future", optional=True, ndim=2)
if exog_future is not None:
if exog_future.shape[0] != steps:
err_msg = f"""\
exog_future only has {exog_future.shape[0]} observations. It must have \
steps ({steps}) observations.
"""
raise ValueError(err_msg)
trend_coefs = None if self.coefs_exog.size == 0 else self.coefs_exog.T
exogs = []
if self.trend.startswith("c"): # constant term
exogs.append(np.ones(steps))
exog_lin_trend = np.arange(self.n_totobs + 1, self.n_totobs + 1 + steps)
if "t" in self.trend:
exogs.append(exog_lin_trend)
if "tt" in self.trend:
exogs.append(exog_lin_trend**2)
if exog_future is not None:
exogs.append(exog_future)
if not exogs:
exog_future = None
else:
exog_future = np.column_stack(exogs)
return forecast(y, self.coefs, trend_coefs, steps, exog_future) | Produce linear minimum MSE forecasts for desired number of steps
ahead, using prior values y
Parameters
----------
y : ndarray (p x k)
steps : int
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lütkepohl pp 37-38 | forecast | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
def forecast_interval(self, y, steps, alpha=0.05, exog_future=None):
"""
Construct forecast interval estimates assuming the y are Gaussian
Parameters
----------
y : {ndarray, None}
The initial values to use for the forecasts. If None,
the last k_ar values of the original endogenous variables are
used.
steps : int
Number of steps ahead to forecast
alpha : float, optional
The significance level for the confidence intervals.
exog_future : ndarray, optional
Forecast values of the exogenous variables. Should include
constant, trend, etc. as needed, including extrapolating out
of sample.
Returns
-------
point : ndarray
Mean value of forecast
lower : ndarray
Lower bound of confidence interval
upper : ndarray
Upper bound of confidence interval
Notes
-----
Lütkepohl pp. 39-40
"""
if not 0 < alpha < 1:
raise ValueError("alpha must be between 0 and 1")
q = util.norm_signif_level(alpha)
point_forecast = self.forecast(y, steps, exog_future=exog_future)
sigma = np.sqrt(self._forecast_vars(steps))
forc_lower = point_forecast - q * sigma
forc_upper = point_forecast + q * sigma
return point_forecast, forc_lower, forc_upper | Construct forecast interval estimates assuming the y are Gaussian
Parameters
----------
y : {ndarray, None}
The initial values to use for the forecasts. If None,
the last k_ar values of the original endogenous variables are
used.
steps : int
Number of steps ahead to forecast
alpha : float, optional
The significance level for the confidence intervals.
exog_future : ndarray, optional
Forecast values of the exogenous variables. Should include
constant, trend, etc. as needed, including extrapolating out
of sample.
Returns
-------
point : ndarray
Mean value of forecast
lower : ndarray
Lower bound of confidence interval
upper : ndarray
Upper bound of confidence interval
Notes
-----
Lütkepohl pp. 39-40 | forecast_interval | python | statsmodels/statsmodels | statsmodels/tsa/vector_ar/var_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/vector_ar/var_model.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.