code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def test_simulate_keywords(austourists):
"""
check whether all keywords are accepted and work without throwing errors.
"""
fit = ExponentialSmoothing(
austourists,
seasonal_periods=4,
trend="add",
seasonal="add",
damped_trend=True,
initialization_method="estimated",
).fit()
# test anchor
assert_almost_equal(
fit.simulate(4, anchor=0, random_state=0).values,
fit.simulate(4, anchor="start", random_state=0).values,
)
assert_almost_equal(
fit.simulate(4, anchor=-1, random_state=0).values,
fit.simulate(4, anchor="2015-12-01", random_state=0).values,
)
assert_almost_equal(
fit.simulate(4, anchor="end", random_state=0).values,
fit.simulate(4, anchor="2016-03-01", random_state=0).values,
)
# test different random error options
fit.simulate(4, repetitions=10, random_errors=scipy.stats.norm)
fit.simulate(4, repetitions=10, random_errors=scipy.stats.norm())
fit.simulate(4, repetitions=10, random_errors=np.random.randn(4, 10))
fit.simulate(4, repetitions=10, random_errors="bootstrap")
# test seeding
res = fit.simulate(4, repetitions=10, random_state=10).values
res2 = fit.simulate(
4, repetitions=10, random_state=np.random.RandomState(10)
).values
assert np.all(res == res2) | check whether all keywords are accepted and work without throwing errors. | test_simulate_keywords | python | statsmodels/statsmodels | statsmodels/tsa/holtwinters/tests/test_holtwinters.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/holtwinters/tests/test_holtwinters.py | BSD-3-Clause |
def test_simulate_boxcox(austourists):
"""
check if simulation results with boxcox fits are reasonable
"""
fit = ExponentialSmoothing(
austourists,
seasonal_periods=4,
trend="add",
seasonal="mul",
damped_trend=False,
initialization_method="estimated",
use_boxcox=True,
).fit()
expected = fit.forecast(4).values
res = fit.simulate(4, repetitions=10, random_state=0).values
mean = np.mean(res, axis=1)
assert np.all(np.abs(mean - expected) < 5) | check if simulation results with boxcox fits are reasonable | test_simulate_boxcox | python | statsmodels/statsmodels | statsmodels/tsa/holtwinters/tests/test_holtwinters.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/holtwinters/tests/test_holtwinters.py | BSD-3-Clause |
def dentonm(indicator, benchmark, freq="aq", **kwargs):
"""
Modified Denton's method to convert low-frequency to high-frequency data.
Uses proportionate first-differences as the penalty function. See notes.
Parameters
----------
indicator : array_like
A low-frequency indicator series. It is assumed that there are no
pre-sample indicators. Ie., the first indicators line up with
the first benchmark.
benchmark : array_like
The higher frequency benchmark. A 1d or 2d data series in columns.
If 2d, then M series are assumed.
freq : str {"aq","qm", "other"}
The frequency to use in the conversion.
* "aq" - Benchmarking an annual series to quarterly.
* "mq" - Benchmarking a quarterly series to monthly.
* "other" - Custom stride. A kwarg, k, must be supplied.
**kwargs
Additional keyword argument. For example:
* k, an int, the number of high-frequency observations that sum to make
an aggregate low-frequency observation. `k` is used with
`freq` == "other".
Returns
-------
transformed : ndarray
The transformed series.
Examples
--------
>>> indicator = [50,100,150,100] * 5
>>> benchmark = [500,400,300,400,500]
>>> benchmarked = dentonm(indicator, benchmark, freq="aq")
Notes
-----
Denton's method minimizes the distance given by the penalty function, in
a least squares sense, between the unknown benchmarked series and the
indicator series subject to the condition that the sum of the benchmarked
series is equal to the benchmark. The modification allows that the first
value not be pre-determined as is the case with Denton's original method.
If the there is no benchmark provided for the last few indicator
observations, then extrapolation is performed using the last
benchmark-indicator ratio of the previous period.
Minimizes sum((X[t]/I[t] - X[t-1]/I[t-1])**2)
s.t.
sum(X) = A, for each period. Where X is the benchmarked series, I is
the indicator, and A is the benchmark.
References
----------
Bloem, A.M, Dippelsman, R.J. and Maehle, N.O. 2001 Quarterly National
Accounts Manual--Concepts, Data Sources, and Compilation. IMF.
http://www.imf.org/external/pubs/ft/qna/2000/Textbook/index.htm
Cholette, P. 1988. "Benchmarking systems of socio-economic time series."
Statistics Canada, Time Series Research and Analysis Division,
Working Paper No TSRA-88-017E.
Denton, F.T. 1971. "Adjustment of monthly or quarterly series to annual
totals: an approach based on quadratic minimization." Journal of the
American Statistical Association. 99-102.
"""
# penalty : str
# Penalty function. Can be "D1", "D2", "D3", "D4", "D5".
# X is the benchmarked series and I is the indicator.
# D1 - sum((X[t] - X[t-1]) - (I[t] - I[ti-1])**2)
# D2 - sum((ln(X[t]/X[t-1]) - ln(I[t]/I[t-1]))**2)
# D3 - sum((X[t]/X[t-1] / I[t]/I[t-1])**2)
# D4 - sum((X[t]/I[t] - X[t-1]/I[t-1])**2)
# D5 - sum((X[t]/I[t] / X[t-1]/I[t-1] - 1)**2)
#NOTE: only D4 is the only one implemented, see IMF chapter 6.
# check arrays and make 2d
indicator = asarray(indicator)
if indicator.ndim == 1:
indicator = indicator[:,None]
benchmark = asarray(benchmark)
if benchmark.ndim == 1:
benchmark = benchmark[:,None]
# get dimensions
N = len(indicator) # total number of high-freq
m = len(benchmark) # total number of low-freq
# number of low-freq observations for aggregate measure
# 4 for annual to quarter and 3 for quarter to monthly
if freq == "aq":
k = 4
elif freq == "qm":
k = 3
elif freq == "other":
k = kwargs.get("k")
if not k:
raise ValueError("k must be supplied with freq=\"other\"")
else:
raise ValueError("freq %s not understood" % freq)
n = k*m # number of indicator series with a benchmark for back-series
# if k*m != n, then we are going to extrapolate q observations
if N > n:
q = N - n
else:
q = 0
# make the aggregator matrix
#B = block_diag(*(ones((k,1)),)*m)
B = np.kron(np.eye(m), ones((k,1)))
# following the IMF paper, we can do
Zinv = diag(1./indicator.squeeze()[:n])
# this is D in Denton's notation (not using initial value correction)
# D = eye(n)
# make off-diagonal = -1
# D[((np.diag_indices(n)[0])[:-1]+1,(np.diag_indices(n)[1])[:-1])] = -1
# account for starting conditions
# H = D[1:,:]
# HTH = dot(H.T,H)
# just make HTH
HTH = eye(n)
diag_idx0, diag_idx1 = diag_indices(n)
HTH[diag_idx0[1:-1], diag_idx1[1:-1]] += 1
HTH[diag_idx0[:-1]+1, diag_idx1[:-1]] = -1
HTH[diag_idx0[:-1], diag_idx1[:-1]+1] = -1
W = dot(dot(Zinv,HTH),Zinv)
# make partitioned matrices
# TODO: break this out so that we can simplify the linalg?
I = zeros((n+m, n+m)) # noqa:E741
I[:n,:n] = W
I[:n,n:] = B
I[n:,:n] = B.T
A = zeros((m+n,1)) # zero first-order constraints
A[-m:] = benchmark # adding up constraints
X = solve(I,A)
X = X[:-m] # drop the lagrange multipliers
# handle extrapolation
if q > 0:
# get last Benchmark-Indicator ratio
bi = X[n-1]/indicator[n-1]
extrapolated = bi * indicator[n:]
X = r_[X,extrapolated]
return X.squeeze() | Modified Denton's method to convert low-frequency to high-frequency data.
Uses proportionate first-differences as the penalty function. See notes.
Parameters
----------
indicator : array_like
A low-frequency indicator series. It is assumed that there are no
pre-sample indicators. Ie., the first indicators line up with
the first benchmark.
benchmark : array_like
The higher frequency benchmark. A 1d or 2d data series in columns.
If 2d, then M series are assumed.
freq : str {"aq","qm", "other"}
The frequency to use in the conversion.
* "aq" - Benchmarking an annual series to quarterly.
* "mq" - Benchmarking a quarterly series to monthly.
* "other" - Custom stride. A kwarg, k, must be supplied.
**kwargs
Additional keyword argument. For example:
* k, an int, the number of high-frequency observations that sum to make
an aggregate low-frequency observation. `k` is used with
`freq` == "other".
Returns
-------
transformed : ndarray
The transformed series.
Examples
--------
>>> indicator = [50,100,150,100] * 5
>>> benchmark = [500,400,300,400,500]
>>> benchmarked = dentonm(indicator, benchmark, freq="aq")
Notes
-----
Denton's method minimizes the distance given by the penalty function, in
a least squares sense, between the unknown benchmarked series and the
indicator series subject to the condition that the sum of the benchmarked
series is equal to the benchmark. The modification allows that the first
value not be pre-determined as is the case with Denton's original method.
If the there is no benchmark provided for the last few indicator
observations, then extrapolation is performed using the last
benchmark-indicator ratio of the previous period.
Minimizes sum((X[t]/I[t] - X[t-1]/I[t-1])**2)
s.t.
sum(X) = A, for each period. Where X is the benchmarked series, I is
the indicator, and A is the benchmark.
References
----------
Bloem, A.M, Dippelsman, R.J. and Maehle, N.O. 2001 Quarterly National
Accounts Manual--Concepts, Data Sources, and Compilation. IMF.
http://www.imf.org/external/pubs/ft/qna/2000/Textbook/index.htm
Cholette, P. 1988. "Benchmarking systems of socio-economic time series."
Statistics Canada, Time Series Research and Analysis Division,
Working Paper No TSRA-88-017E.
Denton, F.T. 1971. "Adjustment of monthly or quarterly series to annual
totals: an approach based on quadratic minimization." Journal of the
American Statistical Association. 99-102. | dentonm | python | statsmodels/statsmodels | statsmodels/tsa/interp/denton.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/interp/denton.py | BSD-3-Clause |
def fixed(self) -> NDArray | pd.DataFrame | None:
"""The fixed data used to construct the model"""
return self.data.orig_fixed | The fixed data used to construct the model | fixed | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def causal(self) -> bool:
"""Flag indicating that the ARDL is causal"""
return self._causal | Flag indicating that the ARDL is causal | causal | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def ar_lags(self) -> list[int] | None:
"""The autoregressive lags included in the model"""
return None if not self._lags else self._lags | The autoregressive lags included in the model | ar_lags | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def dl_lags(self) -> dict[Hashable, list[int]]:
"""The lags of exogenous variables included in the model"""
return self._order | The lags of exogenous variables included in the model | dl_lags | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def ardl_order(self) -> tuple[int, ...]:
"""The order of the ARDL(p,q)"""
ar_order = 0 if not self._lags else int(max(self._lags))
ardl_order = [ar_order]
for lags in self._order.values():
if lags is not None:
ardl_order.append(int(max(lags)))
return tuple(ardl_order) | The order of the ARDL(p,q) | ardl_order | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _setup_regressors(self) -> None:
"""Place holder to let AutoReg init complete"""
self._y = np.empty((self.endog.shape[0] - self._hold_back, 0)) | Place holder to let AutoReg init complete | _setup_regressors | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _format_exog(
exog: ArrayLike2D, order: dict[Hashable, list[int]]
) -> dict[Hashable, np.ndarray]:
"""Transform exogenous variables and orders to regressors"""
if not order:
return {}
max_order = 0
for val in order.values():
if val is not None:
max_order = max(max(val), max_order)
if not isinstance(exog, pd.DataFrame):
exog = array_like(exog, "exog", ndim=2, maxdim=2)
exog_lags = {}
for key in order:
if order[key] is None:
continue
if isinstance(exog, np.ndarray):
assert isinstance(key, int)
col = exog[:, key]
else:
col = exog[key]
lagged_col = lagmat(col, max_order, original="in")
lags = order[key]
exog_lags[key] = lagged_col[:, lags]
return exog_lags | Transform exogenous variables and orders to regressors | _format_exog | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _check_order(self, order: _ARDLOrder) -> dict[Hashable, list[int]]:
"""Validate and standardize the model order"""
return _format_order(self.data.orig_exog, order, self._causal) | Validate and standardize the model order | _check_order | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def fit(
self,
*,
cov_type: str = "nonrobust",
cov_kwds: dict[str, Any] = None,
use_t: bool = True,
) -> ARDLResults:
"""
Estimate the model parameters.
Parameters
----------
cov_type : str
The covariance estimator to use. The most common choices are listed
below. Supports all covariance estimators that are available
in ``OLS.fit``.
* 'nonrobust' - The class OLS covariance estimator that assumes
homoskedasticity.
* 'HC0', 'HC1', 'HC2', 'HC3' - Variants of White's
(or Eiker-Huber-White) covariance estimator. `HC0` is the
standard implementation. The other make corrections to improve
the finite sample performance of the heteroskedasticity robust
covariance estimator.
* 'HAC' - Heteroskedasticity-autocorrelation robust covariance
estimation. Supports cov_kwds.
- `maxlags` integer (required) : number of lags to use.
- `kernel` callable or str (optional) : kernel
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett.
- `use_correction` bool (optional) : If true, use small sample
correction.
cov_kwds : dict, optional
A dictionary of keyword arguments to pass to the covariance
estimator. `nonrobust` and `HC#` do not support cov_kwds.
use_t : bool, optional
A flag indicating that inference should use the Student's t
distribution that accounts for model degree of freedom. If False,
uses the normal distribution. If None, defers the choice to
the cov_type. It also removes degree of freedom corrections from
the covariance estimator when cov_type is 'nonrobust'.
Returns
-------
ARDLResults
Estimation results.
See Also
--------
statsmodels.tsa.ar_model.AutoReg
Ordinary Least Squares estimation.
statsmodels.regression.linear_model.OLS
Ordinary Least Squares estimation.
statsmodels.regression.linear_model.RegressionResults
See ``get_robustcov_results`` for a detailed list of available
covariance estimators and options.
Notes
-----
Use ``OLS`` to estimate model parameters and to estimate parameter
covariance.
"""
params, cov_params, norm_cov_params = self._fit(
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t
)
res = ARDLResults(self, params, cov_params, norm_cov_params, use_t=use_t)
return ARDLResultsWrapper(res) | Estimate the model parameters.
Parameters
----------
cov_type : str
The covariance estimator to use. The most common choices are listed
below. Supports all covariance estimators that are available
in ``OLS.fit``.
* 'nonrobust' - The class OLS covariance estimator that assumes
homoskedasticity.
* 'HC0', 'HC1', 'HC2', 'HC3' - Variants of White's
(or Eiker-Huber-White) covariance estimator. `HC0` is the
standard implementation. The other make corrections to improve
the finite sample performance of the heteroskedasticity robust
covariance estimator.
* 'HAC' - Heteroskedasticity-autocorrelation robust covariance
estimation. Supports cov_kwds.
- `maxlags` integer (required) : number of lags to use.
- `kernel` callable or str (optional) : kernel
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett.
- `use_correction` bool (optional) : If true, use small sample
correction.
cov_kwds : dict, optional
A dictionary of keyword arguments to pass to the covariance
estimator. `nonrobust` and `HC#` do not support cov_kwds.
use_t : bool, optional
A flag indicating that inference should use the Student's t
distribution that accounts for model degree of freedom. If False,
uses the normal distribution. If None, defers the choice to
the cov_type. It also removes degree of freedom corrections from
the covariance estimator when cov_type is 'nonrobust'.
Returns
-------
ARDLResults
Estimation results.
See Also
--------
statsmodels.tsa.ar_model.AutoReg
Ordinary Least Squares estimation.
statsmodels.regression.linear_model.OLS
Ordinary Least Squares estimation.
statsmodels.regression.linear_model.RegressionResults
See ``get_robustcov_results`` for a detailed list of available
covariance estimators and options.
Notes
-----
Use ``OLS`` to estimate model parameters and to estimate parameter
covariance. | fit | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _construct_regressors(
self, hold_back: int | None
) -> tuple[np.ndarray, np.ndarray]:
"""Construct and format model regressors"""
# TODO: Missing adjustment
self._maxlag = max(self._lags) if self._lags else 0
_endog_reg, _endog = lagmat(self.data.endog, self._maxlag, original="sep")
assert isinstance(_endog, np.ndarray)
assert isinstance(_endog_reg, np.ndarray)
self._endog_reg, self._endog = _endog_reg, _endog
if self._endog_reg.shape[1] != len(self._lags):
lag_locs = [lag - 1 for lag in self._lags]
self._endog_reg = self._endog_reg[:, lag_locs]
orig_exog = self.data.orig_exog
self._exog = self._format_exog(orig_exog, self._order)
exog_maxlag = 0
for val in self._order.values():
exog_maxlag = max(exog_maxlag, max(val) if val is not None else 0)
self._maxlag = max(self._maxlag, exog_maxlag)
self._deterministic_reg = self._deterministics.in_sample()
self._blocks = {
"endog": self._endog_reg,
"exog": self._exog,
"deterministic": self._deterministic_reg,
"fixed": self._fixed,
}
x = [self._deterministic_reg, self._endog_reg]
x += [ex for ex in self._exog.values()] + [self._fixed]
reg = np.column_stack(x)
if hold_back is None:
self._hold_back = int(self._maxlag)
if self._hold_back < self._maxlag:
raise ValueError(
"hold_back must be >= the maximum lag of the endog and exog "
"variables"
)
reg = reg[self._hold_back :]
if reg.shape[1] > reg.shape[0]:
raise ValueError(
f"The number of regressors ({reg.shape[1]}) including "
"deterministics, lags of the endog, lags of the exogenous, "
"and fixed regressors is larger than the sample available "
f"for estimation ({reg.shape[0]})."
)
return self.data.endog[self._hold_back :], reg | Construct and format model regressors | _construct_regressors | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _construct_variable_names(self):
"""Construct model variables names"""
y_name = self.data.ynames
endog_lag_names = [f"{y_name}.L{i}" for i in self._lags]
exog = self.data.orig_exog
exog_names = {}
for key in self._order:
if isinstance(exog, np.ndarray):
base = f"x{key}"
else:
base = str(key)
lags = self._order[key]
exog_names[key] = [f"{base}.L{lag}" for lag in lags]
self._names = {
"endog": endog_lag_names,
"exog": exog_names,
"deterministic": self._deterministic_reg.columns,
"fixed": self._fixed_names,
}
x_names = list(self._deterministic_reg.columns)
x_names += endog_lag_names
for key in exog_names:
x_names += exog_names[key]
x_names += self._fixed_names
return y_name, x_names | Construct model variables names | _construct_variable_names | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _forecasting_x(
self,
start: int,
end: int,
num_oos: int,
exog: ArrayLike2D | None,
exog_oos: ArrayLike2D | None,
fixed: ArrayLike2D | None,
fixed_oos: ArrayLike2D | None,
) -> np.ndarray:
"""Construct exog matrix for forecasts"""
def pad_x(x: np.ndarray, pad: int) -> np.ndarray:
if pad == 0:
return x
k = x.shape[1]
return np.vstack([np.full((pad, k), np.nan), x])
pad = 0 if start >= self._hold_back else self._hold_back - start
# Shortcut if all in-sample and no new data
if (end + 1) < self.endog.shape[0] and exog is None and fixed is None:
adjusted_start = max(start - self._hold_back, 0)
return pad_x(self._x[adjusted_start : end + 1 - self._hold_back], pad)
# If anything changed, rebuild x array
exog = self.data.exog if exog is None else np.asarray(exog)
if exog_oos is not None:
exog = np.vstack([exog, np.asarray(exog_oos)[:num_oos]])
fixed = self._fixed if fixed is None else np.asarray(fixed)
if fixed_oos is not None:
fixed = np.vstack([fixed, np.asarray(fixed_oos)[:num_oos]])
det = self._deterministics.in_sample()
if num_oos:
oos_det = self._deterministics.out_of_sample(num_oos)
det = pd.concat([det, oos_det], axis=0)
endog = self.data.endog
if num_oos:
endog = np.hstack([endog, np.full(num_oos, np.nan)])
x = [det]
if self._lags:
endog_reg = lagmat(endog, max(self._lags), original="ex")
x.append(endog_reg[:, [lag - 1 for lag in self._lags]])
if self.ardl_order[1:]:
if isinstance(self.data.orig_exog, pd.DataFrame):
exog = pd.DataFrame(exog, columns=self.data.orig_exog.columns)
exog = self._format_exog(exog, self._order)
x.extend([np.asarray(arr) for arr in exog.values()])
if fixed.shape[1] > 0:
x.append(fixed)
_x = np.column_stack(x)
_x[: self._hold_back] = np.nan
return _x[start:] | Construct exog matrix for forecasts | _forecasting_x | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def predict(
self,
params: ArrayLike1D,
start: int | str | dt.datetime | pd.Timestamp | None = None,
end: int | str | dt.datetime | pd.Timestamp | None = None,
dynamic: bool = False,
exog: NDArray | pd.DataFrame | None = None,
exog_oos: NDArray | pd.DataFrame | None = None,
fixed: NDArray | pd.DataFrame | None = None,
fixed_oos: NDArray | pd.DataFrame | None = None,
):
"""
In-sample prediction and out-of-sample forecasting.
Parameters
----------
params : array_like
The fitted model parameters.
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous
variables. Must have the same number of columns as the exog
used when the model was created, and at least as many rows as
the number of out-of-sample forecasts.
fixed : array_like
A replacement fixed array. Must have the same shape as the
fixed data array used when the model was created.
fixed_oos : array_like
An array containing out-of-sample values of the fixed variables.
Must have the same number of columns as the fixed used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
predictions : {ndarray, Series}
Array of out of in-sample predictions and / or out-of-sample
forecasts.
"""
params, exog, exog_oos, start, end, num_oos = self._prepare_prediction(
params, exog, exog_oos, start, end
)
def check_exog(arr, name, orig, exact):
if isinstance(orig, pd.DataFrame):
if not isinstance(arr, pd.DataFrame):
raise TypeError(
f"{name} must be a DataFrame when the original exog "
"was a DataFrame"
)
if sorted(arr.columns) != sorted(self.data.orig_exog.columns):
raise ValueError(
f"{name} must have the same columns as the original exog"
)
else:
arr = array_like(arr, name, ndim=2, optional=False)
if arr.ndim != 2 or arr.shape[1] != orig.shape[1]:
raise ValueError(
f"{name} must have the same number of columns as the "
f"original data, {orig.shape[1]}"
)
if exact and arr.shape[0] != orig.shape[0]:
raise ValueError(
f"{name} must have the same number of rows as the "
f"original data ({n})."
)
return arr
n = self.data.endog.shape[0]
if exog is not None:
exog = check_exog(exog, "exog", self.data.orig_exog, True)
if exog_oos is not None:
exog_oos = check_exog(exog_oos, "exog_oos", self.data.orig_exog, False)
if fixed is not None:
fixed = check_exog(fixed, "fixed", self._fixed, True)
if fixed_oos is not None:
fixed_oos = check_exog(
np.asarray(fixed_oos), "fixed_oos", self._fixed, False
)
# The maximum number of 1-step predictions that can be made,
# which depends on the model and lags
if self._fixed.shape[1] or not self._causal:
max_1step = 0
else:
max_1step = np.inf if not self._lags else min(self._lags)
if self._order:
min_exog = min([min(v) for v in self._order.values()])
max_1step = min(max_1step, min_exog)
if num_oos > max_1step:
if self._order and exog_oos is None:
raise ValueError(
"exog_oos must be provided when out-of-sample "
"observations require values of the exog not in the "
"original sample"
)
elif self._order and (exog_oos.shape[0] + max_1step) < num_oos:
raise ValueError(
f"exog_oos must have at least {num_oos - max_1step} "
f"observations to produce {num_oos} forecasts based on "
"the model specification."
)
if self._fixed.shape[1] and fixed_oos is None:
raise ValueError(
"fixed_oos must be provided when predicting "
"out-of-sample observations"
)
elif self._fixed.shape[1] and fixed_oos.shape[0] < num_oos:
raise ValueError(
f"fixed_oos must have at least {num_oos} observations "
f"to produce {num_oos} forecasts."
)
# Extend exog_oos if fcast is valid for horizon but no exog_oos given
if self.exog is not None and exog_oos is None and num_oos:
exog_oos = np.full((num_oos, self.exog.shape[1]), np.nan)
if isinstance(self.data.orig_exog, pd.DataFrame):
exog_oos = pd.DataFrame(exog_oos, columns=self.data.orig_exog.columns)
x = self._forecasting_x(start, end, num_oos, exog, exog_oos, fixed, fixed_oos)
if dynamic is False:
dynamic_start = end + 1 - start
else:
dynamic_step = self._parse_dynamic(dynamic, start)
dynamic_start = dynamic_step
if start < self._hold_back:
dynamic_start = max(dynamic_start, self._hold_back - start)
fcasts = np.full(x.shape[0], np.nan)
fcasts[:dynamic_start] = x[:dynamic_start] @ params
offset = self._deterministic_reg.shape[1]
for i in range(dynamic_start, fcasts.shape[0]):
for j, lag in enumerate(self._lags):
loc = i - lag
if loc >= dynamic_start:
val = fcasts[loc]
else:
# Actual data
val = self.endog[start + loc]
x[i, offset + j] = val
fcasts[i] = x[i] @ params
return self._wrap_prediction(fcasts, start, end + 1 + num_oos, 0) | In-sample prediction and out-of-sample forecasting.
Parameters
----------
params : array_like
The fitted model parameters.
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous
variables. Must have the same number of columns as the exog
used when the model was created, and at least as many rows as
the number of out-of-sample forecasts.
fixed : array_like
A replacement fixed array. Must have the same shape as the
fixed data array used when the model was created.
fixed_oos : array_like
An array containing out-of-sample values of the fixed variables.
Must have the same number of columns as the fixed used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
predictions : {ndarray, Series}
Array of out of in-sample predictions and / or out-of-sample
forecasts. | predict | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def from_formula(
cls,
formula: str,
data: pd.DataFrame,
lags: int | Sequence[int] | None = 0,
order: _ARDLOrder = 0,
trend: Literal["n", "c", "ct", "ctt"] = "n",
*,
causal: bool = False,
seasonal: bool = False,
deterministic: DeterministicProcess | None = None,
hold_back: int | None = None,
period: int | None = None,
missing: Literal["none", "raise"] = "none",
) -> ARDL | UECM:
"""
Construct an ARDL from a formula
Parameters
----------
formula : str
Formula with form dependent ~ independent | fixed. See Examples
below.
data : DataFrame
DataFrame containing the variables in the formula.
lags : {int, list[int]}
The number of lags to include in the model if an integer or the
list of lag indices to include. For example, [1, 4] will only
include lags 1 and 4 while lags=4 will include lags 1, 2, 3,
and 4.
order : {int, sequence[int], dict}
If int, uses lags 0, 1, ..., order for all exog variables. If
sequence[int], uses the ``order`` for all variables. If a dict,
applies the lags series by series. If ``exog`` is anything other
than a DataFrame, the keys are the column index of exog (e.g., 0,
1, ...). If a DataFrame, keys are column names.
causal : bool, optional
Whether to include lag 0 of exog variables. If True, only
includes lags 1, 2, ...
trend : {'n', 'c', 't', 'ct'}, optional
The trend to include in the model:
* 'n' - No trend.
* 'c' - Constant only.
* 't' - Time trend only.
* 'ct' - Constant and time trend.
The default is 'c'.
seasonal : bool, optional
Flag indicating whether to include seasonal dummies in the model.
If seasonal is True and trend includes 'c', then the first period
is excluded from the seasonal terms.
deterministic : DeterministicProcess, optional
A deterministic process. If provided, trend and seasonal are
ignored. A warning is raised if trend is not "n" and seasonal
is not False.
hold_back : {None, int}, optional
Initial observations to exclude from the estimation sample. If
None, then hold_back is equal to the maximum lag in the model.
Set to a non-zero value to produce comparable models with
different lag length. For example, to compare the fit of a model
with lags=3 and lags=1, set hold_back=3 which ensures that both
models are estimated using observations 3,...,nobs. hold_back
must be >= the maximum lag in the model.
period : {None, int}, optional
The period of the data. Only used if seasonal is True. This
parameter can be omitted if using a pandas object for endog
that contains a recognized frequency.
missing : {"none", "drop", "raise"}, optional
Available options are 'none', 'drop', and 'raise'. If 'none', no
NaN checking is done. If 'drop', any observations with NaNs are
dropped. If 'raise', an error is raised. Default is 'none'.
Returns
-------
ARDL
The ARDL model instance
Examples
--------
A simple ARDL using the Danish data
>>> from statsmodels.datasets.danish_data import load
>>> from statsmodels.tsa.api import ARDL
>>> data = load().data
>>> mod = ARDL.from_formula("lrm ~ ibo", data, 2, 2)
Fixed regressors can be specified using a |
>>> mod = ARDL.from_formula("lrm ~ ibo | ide", data, 2, 2)
"""
index = data.index
fixed_formula = None
if "|" in formula:
formula, fixed_formula = formula.split("|")
fixed_formula = fixed_formula.strip()
mod = OLS.from_formula(formula + " -1", data)
exog = mod.data.orig_exog
exog.index = index
endog = mod.data.orig_endog
endog.index = index
if fixed_formula is not None:
endog_name = formula.split("~")[0].strip()
fixed_formula = f"{endog_name} ~ {fixed_formula} - 1"
mod = OLS.from_formula(fixed_formula, data)
fixed: pd.DataFrame | None = mod.data.orig_exog
fixed.index = index
else:
fixed = None
return cls(
endog,
lags,
exog,
order,
trend=trend,
fixed=fixed,
causal=causal,
seasonal=seasonal,
deterministic=deterministic,
hold_back=hold_back,
period=period,
missing=missing,
) | Construct an ARDL from a formula
Parameters
----------
formula : str
Formula with form dependent ~ independent | fixed. See Examples
below.
data : DataFrame
DataFrame containing the variables in the formula.
lags : {int, list[int]}
The number of lags to include in the model if an integer or the
list of lag indices to include. For example, [1, 4] will only
include lags 1 and 4 while lags=4 will include lags 1, 2, 3,
and 4.
order : {int, sequence[int], dict}
If int, uses lags 0, 1, ..., order for all exog variables. If
sequence[int], uses the ``order`` for all variables. If a dict,
applies the lags series by series. If ``exog`` is anything other
than a DataFrame, the keys are the column index of exog (e.g., 0,
1, ...). If a DataFrame, keys are column names.
causal : bool, optional
Whether to include lag 0 of exog variables. If True, only
includes lags 1, 2, ...
trend : {'n', 'c', 't', 'ct'}, optional
The trend to include in the model:
* 'n' - No trend.
* 'c' - Constant only.
* 't' - Time trend only.
* 'ct' - Constant and time trend.
The default is 'c'.
seasonal : bool, optional
Flag indicating whether to include seasonal dummies in the model.
If seasonal is True and trend includes 'c', then the first period
is excluded from the seasonal terms.
deterministic : DeterministicProcess, optional
A deterministic process. If provided, trend and seasonal are
ignored. A warning is raised if trend is not "n" and seasonal
is not False.
hold_back : {None, int}, optional
Initial observations to exclude from the estimation sample. If
None, then hold_back is equal to the maximum lag in the model.
Set to a non-zero value to produce comparable models with
different lag length. For example, to compare the fit of a model
with lags=3 and lags=1, set hold_back=3 which ensures that both
models are estimated using observations 3,...,nobs. hold_back
must be >= the maximum lag in the model.
period : {None, int}, optional
The period of the data. Only used if seasonal is True. This
parameter can be omitted if using a pandas object for endog
that contains a recognized frequency.
missing : {"none", "drop", "raise"}, optional
Available options are 'none', 'drop', and 'raise'. If 'none', no
NaN checking is done. If 'drop', any observations with NaNs are
dropped. If 'raise', an error is raised. Default is 'none'.
Returns
-------
ARDL
The ARDL model instance
Examples
--------
A simple ARDL using the Danish data
>>> from statsmodels.datasets.danish_data import load
>>> from statsmodels.tsa.api import ARDL
>>> data = load().data
>>> mod = ARDL.from_formula("lrm ~ ibo", data, 2, 2)
Fixed regressors can be specified using a |
>>> mod = ARDL.from_formula("lrm ~ ibo | ide", data, 2, 2) | from_formula | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def forecast(
self,
steps: int = 1,
exog: NDArray | pd.DataFrame | None = None,
fixed: NDArray | pd.DataFrame | None = None,
) -> np.ndarray | pd.Series:
"""
Out-of-sample forecasts
Parameters
----------
steps : {int, str, datetime}, default 1
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency,
steps must be an integer.
exog : array_like, optional
Exogenous values to use out-of-sample. Must have same number of
columns as original exog data and at least `steps` rows
fixed : array_like, optional
Fixed values to use out-of-sample. Must have same number of
columns as original fixed data and at least `steps` rows
Returns
-------
array_like
Array of out of in-sample predictions and / or out-of-sample
forecasts.
See Also
--------
ARDLResults.predict
In- and out-of-sample predictions
ARDLResults.get_prediction
In- and out-of-sample predictions and confidence intervals
"""
start = self.model.data.orig_endog.shape[0]
if isinstance(steps, (int, np.integer)):
end = start + steps - 1
else:
end = steps
return self.predict(
start=start, end=end, dynamic=False, exog_oos=exog, fixed_oos=fixed
) | Out-of-sample forecasts
Parameters
----------
steps : {int, str, datetime}, default 1
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency,
steps must be an integer.
exog : array_like, optional
Exogenous values to use out-of-sample. Must have same number of
columns as original exog data and at least `steps` rows
fixed : array_like, optional
Fixed values to use out-of-sample. Must have same number of
columns as original fixed data and at least `steps` rows
Returns
-------
array_like
Array of out of in-sample predictions and / or out-of-sample
forecasts.
See Also
--------
ARDLResults.predict
In- and out-of-sample predictions
ARDLResults.get_prediction
In- and out-of-sample predictions and confidence intervals | forecast | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _lag_repr(self) -> np.ndarray:
"""Returns poly repr of an AR, (1 -phi1 L -phi2 L^2-...)"""
ar_lags = self._ar_lags if self._ar_lags is not None else []
k_ar = len(ar_lags)
ar_params = np.zeros(self._max_lag + 1)
ar_params[0] = 1
offset = self.model._deterministic_reg.shape[1]
params = self._params[offset : offset + k_ar]
for i, lag in enumerate(ar_lags):
ar_params[lag] = -params[i]
return ar_params | Returns poly repr of an AR, (1 -phi1 L -phi2 L^2-...) | _lag_repr | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def get_prediction(
self,
start: int | str | dt.datetime | pd.Timestamp | None = None,
end: int | str | dt.datetime | pd.Timestamp | None = None,
dynamic: bool = False,
exog: NDArray | pd.DataFrame | None = None,
exog_oos: NDArray | pd.DataFrame | None = None,
fixed: NDArray | pd.DataFrame | None = None,
fixed_oos: NDArray | pd.DataFrame | None = None,
) -> np.ndarray | pd.Series:
"""
Predictions and prediction intervals
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous variable.
Must has the same number of columns as the exog used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
fixed : array_like
A replacement fixed array. Must have the same shape as the
fixed data array used when the model was created.
fixed_oos : array_like
An array containing out-of-sample values of the fixed variables.
Must have the same number of columns as the fixed used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
PredictionResults
Prediction results with mean and prediction intervals
"""
mean = self.predict(
start=start,
end=end,
dynamic=dynamic,
exog=exog,
exog_oos=exog_oos,
fixed=fixed,
fixed_oos=fixed_oos,
)
mean_var = np.full_like(mean, fill_value=self.sigma2)
mean_var[np.isnan(mean)] = np.nan
start = 0 if start is None else start
end = self.model._index[-1] if end is None else end
_, _, oos, _ = self.model._get_prediction_index(start, end)
if oos > 0:
ar_params = self._lag_repr()
ma = arma2ma(ar_params, np.ones(1), lags=oos)
mean_var[-oos:] = self.sigma2 * np.cumsum(ma**2)
if isinstance(mean, pd.Series):
mean_var = pd.Series(mean_var, index=mean.index)
return PredictionResults(mean, mean_var) | Predictions and prediction intervals
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous variable.
Must has the same number of columns as the exog used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
fixed : array_like
A replacement fixed array. Must have the same shape as the
fixed data array used when the model was created.
fixed_oos : array_like
An array containing out-of-sample values of the fixed variables.
Must have the same number of columns as the fixed used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
PredictionResults
Prediction results with mean and prediction intervals | get_prediction | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def plot_predict(
self,
start: int | str | dt.datetime | pd.Timestamp | None = None,
end: int | str | dt.datetime | pd.Timestamp | None = None,
dynamic: bool = False,
exog: NDArray | pd.DataFrame | None = None,
exog_oos: NDArray | pd.DataFrame | None = None,
fixed: NDArray | pd.DataFrame | None = None,
fixed_oos: NDArray | pd.DataFrame | None = None,
alpha: float = 0.05,
in_sample: bool = True,
fig: matplotlib.figure.Figure = None,
figsize: tuple[int, int] | None = None,
) -> matplotlib.figure.Figure:
"""
Plot in- and out-of-sample predictions
Parameters
----------\n%(predict_params)s
alpha : {float, None}
The tail probability not covered by the confidence interval. Must
be in (0, 1). Confidence interval is constructed assuming normally
distributed shocks. If None, figure will not show the confidence
interval.
in_sample : bool
Flag indicating whether to include the in-sample period in the
plot.
fig : Figure
An existing figure handle. If not provided, a new figure is
created.
figsize: tuple[float, float]
Tuple containing the figure size values.
Returns
-------
Figure
Figure handle containing the plot.
"""
predictions = self.get_prediction(
start=start,
end=end,
dynamic=dynamic,
exog=exog,
exog_oos=exog_oos,
fixed=fixed,
fixed_oos=fixed_oos,
)
return self._plot_predictions(
predictions, start, end, alpha, in_sample, fig, figsize
) | Plot in- and out-of-sample predictions
Parameters
----------\n%(predict_params)s
alpha : {float, None}
The tail probability not covered by the confidence interval. Must
be in (0, 1). Confidence interval is constructed assuming normally
distributed shocks. If None, figure will not show the confidence
interval.
in_sample : bool
Flag indicating whether to include the in-sample period in the
plot.
fig : Figure
An existing figure handle. If not provided, a new figure is
created.
figsize: tuple[float, float]
Tuple containing the figure size values.
Returns
-------
Figure
Figure handle containing the plot. | plot_predict | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def summary(self, alpha: float = 0.05) -> Summary:
"""
Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
Summary
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
model = self.model
title = model.__class__.__name__ + " Model Results"
method = "Conditional MLE"
# get sample
start = self._hold_back
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime("%m-%d-%Y")]
sample += ["- " + dates[-1].strftime("%m-%d-%Y")]
else:
sample = [str(start), str(len(self.data.orig_endog))]
model = self.model.__class__.__name__ + str(self.model.ardl_order)
if self.model.seasonal:
model = "Seas. " + model
dep_name = str(self.model.endog_names)
top_left = [
("Dep. Variable:", [dep_name]),
("Model:", [model]),
("Method:", [method]),
("Date:", None),
("Time:", None),
("Sample:", [sample[0]]),
("", [sample[1]]),
]
top_right = [
("No. Observations:", [str(len(self.model.endog))]),
("Log Likelihood", ["%#5.3f" % self.llf]),
("S.D. of innovations", ["%#5.3f" % self.sigma2**0.5]),
("AIC", ["%#5.3f" % self.aic]),
("BIC", ["%#5.3f" % self.bic]),
("HQIC", ["%#5.3f" % self.hqic]),
]
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right, title=title)
smry.add_table_params(self, alpha=alpha, use_t=False)
return smry | Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
Summary
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary | summary | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def dl_lags(self) -> dict[Hashable, list[int]]:
"""The lags of exogenous variables in the selected model"""
return self._model.dl_lags | The lags of exogenous variables in the selected model | dl_lags | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _check_lags(
self, lags: int | Sequence[int] | None, hold_back: int | None
) -> tuple[list[int], int]:
"""Check lags value conforms to requirement"""
if not (isinstance(lags, _INT_TYPES) or lags is None):
raise TypeError("lags must be an integer or None")
return super()._check_lags(lags, hold_back) | Check lags value conforms to requirement | _check_lags | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _check_order(self, order: _ARDLOrder):
"""Check order conforms to requirement"""
if isinstance(order, Mapping):
for k, v in order.items():
if not isinstance(v, _INT_TYPES) and v is not None:
raise TypeError("order values must be positive integers or None")
elif not (isinstance(order, _INT_TYPES) or order is None):
raise TypeError(
"order must be None, a positive integer, or a dict "
"containing positive integers or None"
)
# TODO: Check order is >= 1
order = super()._check_order(order)
if not order:
raise ValueError("Model must contain at least one exogenous variable")
for key, val in order.items():
if val == [0]:
raise ValueError(
"All included exog variables must have a lag length >= 1"
)
return order | Check order conforms to requirement | _check_order | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _construct_variable_names(self):
"""Construct model variables names"""
endog = self.data.orig_endog
if isinstance(endog, pd.Series):
y_base = endog.name or "y"
elif isinstance(endog, pd.DataFrame):
y_base = endog.squeeze().name or "y"
else:
y_base = "y"
y_name = f"D.{y_base}"
# 1. Deterministics
x_names = list(self._deterministic_reg.columns)
# 2. Levels
x_names.append(f"{y_base}.L1")
orig_exog = self.data.orig_exog
exog_pandas = isinstance(orig_exog, pd.DataFrame)
dexog_names = []
for key, val in self._order.items():
if val is not None:
if exog_pandas:
x_name = f"{key}.L1"
else:
x_name = f"x{key}.L1"
x_names.append(x_name)
lag_base = x_name[:-1]
for lag in val[:-1]:
dexog_names.append(f"D.{lag_base}{lag}")
# 3. Lagged endog
y_lags = max(self._lags) if self._lags else 0
dendog_names = [f"{y_name}.L{lag}" for lag in range(1, y_lags)]
x_names.extend(dendog_names)
x_names.extend(dexog_names)
x_names.extend(self._fixed_names)
return y_name, x_names | Construct model variables names | _construct_variable_names | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _construct_regressors(
self, hold_back: int | None
) -> tuple[np.ndarray, np.ndarray]:
"""Construct and format model regressors"""
# 1. Endogenous and endogenous lags
self._maxlag = max(self._lags) if self._lags else 0
dendog = np.full_like(self.data.endog, np.nan)
dendog[1:] = np.diff(self.data.endog, axis=0)
dlag = max(0, self._maxlag - 1)
self._endog_reg, self._endog = lagmat(dendog, dlag, original="sep")
# 2. Deterministics
self._deterministic_reg = self._deterministics.in_sample()
# 3. Levels
orig_exog = self.data.orig_exog
exog_pandas = isinstance(orig_exog, pd.DataFrame)
lvl = np.full_like(self.data.endog, np.nan)
lvl[1:] = self.data.endog[:-1]
lvls = [lvl.copy()]
for key, val in self._order.items():
if val is not None:
if exog_pandas:
loc = orig_exog.columns.get_loc(key)
else:
loc = key
lvl[1:] = self.data.exog[:-1, loc]
lvls.append(lvl.copy())
self._levels = np.column_stack(lvls)
# 4. exog Lags
if exog_pandas:
dexog = orig_exog.diff()
else:
dexog = np.full_like(self.data.exog, np.nan)
dexog[1:] = np.diff(orig_exog, axis=0)
adj_order = {}
for key, val in self._order.items():
val = None if (val is None or val == [1]) else val[:-1]
adj_order[key] = val
self._exog = self._format_exog(dexog, adj_order)
self._blocks = {
"deterministic": self._deterministic_reg,
"levels": self._levels,
"endog": self._endog_reg,
"exog": self._exog,
"fixed": self._fixed,
}
blocks = [self._endog]
for key, val in self._blocks.items():
if key != "exog":
blocks.append(np.asarray(val))
else:
for subval in val.values():
blocks.append(np.asarray(subval))
y = blocks[0]
reg = np.column_stack(blocks[1:])
exog_maxlag = 0
for val in self._order.values():
exog_maxlag = max(exog_maxlag, max(val) if val is not None else 0)
self._maxlag = max(self._maxlag, exog_maxlag)
# Must be at least 1 since the endog is differenced
self._maxlag = max(self._maxlag, 1)
if hold_back is None:
self._hold_back = int(self._maxlag)
if self._hold_back < self._maxlag:
raise ValueError(
"hold_back must be >= the maximum lag of the endog and exog "
"variables"
)
reg = reg[self._hold_back :]
if reg.shape[1] > reg.shape[0]:
raise ValueError(
f"The number of regressors ({reg.shape[1]}) including "
"deterministics, lags of the endog, lags of the exogenous, "
"and fixed regressors is larger than the sample available "
f"for estimation ({reg.shape[0]})."
)
return np.squeeze(y)[self._hold_back :], reg | Construct and format model regressors | _construct_regressors | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def from_ardl(cls, ardl: ARDL, missing: Literal["none", "drop", "raise"] = "none"):
"""
Construct a UECM from an ARDL model
Parameters
----------
ardl : ARDL
The ARDL model instance
missing : {"none", "drop", "raise"}, default "none"
How to treat missing observations.
Returns
-------
UECM
The UECM model instance
Notes
-----
The lag requirements for a UECM are stricter than for an ARDL.
Any variable that is included in the UECM must have a lag length
of at least 1. Additionally, the included lags must be contiguous
starting at 0 if non-causal or 1 if causal.
"""
err = (
"UECM can only be created from ARDL models that include all "
"{var_typ} lags up to the maximum lag in the model."
)
uecm_lags = {}
dl_lags = ardl.dl_lags
for key, val in dl_lags.items():
max_val = max(val)
if len(dl_lags[key]) < (max_val + int(not ardl.causal)):
raise ValueError(err.format(var_typ="exogenous"))
uecm_lags[key] = max_val
if ardl.ar_lags is None:
ar_lags = None
else:
max_val = max(ardl.ar_lags)
if len(ardl.ar_lags) != max_val:
raise ValueError(err.format(var_typ="endogenous"))
ar_lags = max_val
return cls(
ardl.data.orig_endog,
ar_lags,
ardl.data.orig_exog,
uecm_lags,
trend=ardl.trend,
fixed=ardl.fixed,
seasonal=ardl.seasonal,
hold_back=ardl.hold_back,
period=ardl.period,
causal=ardl.causal,
missing=missing,
deterministic=ardl.deterministic,
) | Construct a UECM from an ARDL model
Parameters
----------
ardl : ARDL
The ARDL model instance
missing : {"none", "drop", "raise"}, default "none"
How to treat missing observations.
Returns
-------
UECM
The UECM model instance
Notes
-----
The lag requirements for a UECM are stricter than for an ARDL.
Any variable that is included in the UECM must have a lag length
of at least 1. Additionally, the included lags must be contiguous
starting at 0 if non-causal or 1 if causal. | from_ardl | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def predict(
self,
params: ArrayLike1D,
start: int | str | dt.datetime | pd.Timestamp | None = None,
end: int | str | dt.datetime | pd.Timestamp | None = None,
dynamic: bool = False,
exog: NDArray | pd.DataFrame | None = None,
exog_oos: NDArray | pd.DataFrame | None = None,
fixed: NDArray | pd.DataFrame | None = None,
fixed_oos: NDArray | pd.DataFrame | None = None,
) -> np.ndarray:
"""
In-sample prediction and out-of-sample forecasting.
Parameters
----------
params : array_like
The fitted model parameters.
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous
variables. Must have the same number of columns as the exog
used when the model was created, and at least as many rows as
the number of out-of-sample forecasts.
fixed : array_like
A replacement fixed array. Must have the same shape as the
fixed data array used when the model was created.
fixed_oos : array_like
An array containing out-of-sample values of the fixed variables.
Must have the same number of columns as the fixed used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
predictions : {ndarray, Series}
Array of out of in-sample predictions and / or out-of-sample
forecasts.
"""
if dynamic is not False:
raise NotImplementedError("dynamic forecasts are not supported")
params, exog, exog_oos, start, end, num_oos = self._prepare_prediction(
params, exog, exog_oos, start, end
)
if num_oos != 0:
raise NotImplementedError("Out-of-sample forecasts are not supported")
pred = np.full(self.endog.shape[0], np.nan)
pred[-self._x.shape[0] :] = self._x @ params
return pred[start : end + 1] | In-sample prediction and out-of-sample forecasting.
Parameters
----------
params : array_like
The fitted model parameters.
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous
variables. Must have the same number of columns as the exog
used when the model was created, and at least as many rows as
the number of out-of-sample forecasts.
fixed : array_like
A replacement fixed array. Must have the same shape as the
fixed data array used when the model was created.
fixed_oos : array_like
An array containing out-of-sample values of the fixed variables.
Must have the same number of columns as the fixed used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
predictions : {ndarray, Series}
Array of out of in-sample predictions and / or out-of-sample
forecasts. | predict | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def resid(self):
"""
The residuals of the model.
"""
model = self.model
return model._y - self.fittedvalues | The residuals of the model. | resid | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def ci_params(self) -> np.ndarray | pd.Series:
"""Parameters of normalized cointegrating relationship"""
ndet = self.model._blocks["deterministic"].shape[1]
nlvl = self.model._blocks["levels"].shape[1]
base = np.asarray(self.params)[ndet]
return self._ci_wrap(self.params[: ndet + nlvl] / base, "ci_params") | Parameters of normalized cointegrating relationship | ci_params | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def ci_bse(self) -> np.ndarray | pd.Series:
"""Standard Errors of normalized cointegrating relationship"""
bse = np.sqrt(np.diag(self.ci_cov_params()))
return self._ci_wrap(bse, "ci_bse") | Standard Errors of normalized cointegrating relationship | ci_bse | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def ci_tvalues(self) -> np.ndarray | pd.Series:
"""T-values of normalized cointegrating relationship"""
ndet = self.model._blocks["deterministic"].shape[1]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tvalues = np.asarray(self.ci_params) / np.asarray(self.ci_bse)
tvalues[ndet] = np.nan
return self._ci_wrap(tvalues, "ci_tvalues") | T-values of normalized cointegrating relationship | ci_tvalues | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def ci_pvalues(self) -> np.ndarray | pd.Series:
"""P-values of normalized cointegrating relationship"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pvalues = 2 * (1 - stats.norm.cdf(np.abs(self.ci_tvalues)))
return self._ci_wrap(pvalues, "ci_pvalues") | P-values of normalized cointegrating relationship | ci_pvalues | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def ci_cov_params(self) -> Float64Array | pd.DataFrame:
"""Covariance of normalized of cointegrating relationship"""
ndet = self.model._blocks["deterministic"].shape[1]
nlvl = self.model._blocks["levels"].shape[1]
loc = list(range(ndet + nlvl))
cov = self.cov_params()
cov_a = np.asarray(cov)
ci_cov = cov_a[np.ix_(loc, loc)]
m = ci_cov.shape[0]
params = np.asarray(self.params)[: ndet + nlvl]
base = params[ndet]
d = np.zeros((m, m))
for i in range(m):
if i == ndet:
continue
d[i, i] = 1 / base
d[i, ndet] = -params[i] / (base**2)
ci_cov = d @ ci_cov @ d.T
return self._ci_wrap(ci_cov) | Covariance of normalized of cointegrating relationship | ci_cov_params | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def _lag_repr(self):
"""Returns poly repr of an AR, (1 -phi1 L -phi2 L^2-...)""" | Returns poly repr of an AR, (1 -phi1 L -phi2 L^2-...) | _lag_repr | python | statsmodels/statsmodels | statsmodels/tsa/ardl/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ardl/model.py | BSD-3-Clause |
def arma_innovations(endog, ar_params=None, ma_params=None, sigma2=1,
normalize=False, prefix=None):
"""
Compute innovations using a given ARMA process.
Parameters
----------
endog : ndarray
The observed time-series process, may be univariate or multivariate.
ar_params : ndarray, optional
Autoregressive parameters.
ma_params : ndarray, optional
Moving average parameters.
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
normalize : bool, optional
Whether or not to normalize the returned innovations. Default is False.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
innovations : ndarray
Innovations (one-step-ahead prediction errors) for the given `endog`
series with predictions based on the given ARMA process. If
`normalize=True`, then the returned innovations have been "whitened" by
dividing through by the square root of the mean square error.
innovations_mse : ndarray
Mean square error for the innovations.
"""
# Parameters
endog = np.require(endog, requirements="W")
squeezed = endog.ndim == 1
if squeezed:
endog = endog[:, None]
ar_params = np.atleast_1d([] if ar_params is None else ar_params)
ma_params = np.atleast_1d([] if ma_params is None else ma_params)
nobs, k_endog = endog.shape
ar = np.r_[1, -ar_params]
ma = np.r_[1, ma_params]
# Get BLAS prefix
if prefix is None:
prefix, dtype, _ = find_best_blas_type(
[endog, ar_params, ma_params, np.array(sigma2)])
dtype = prefix_dtype_map[prefix]
# Make arrays contiguous for BLAS calls
endog = np.asfortranarray(endog, dtype=dtype)
ar_params = np.asfortranarray(ar_params, dtype=dtype)
ma_params = np.asfortranarray(ma_params, dtype=dtype)
sigma2 = dtype(sigma2).item()
# Get the appropriate functions
arma_transformed_acovf_fast = getattr(
_arma_innovations, prefix + 'arma_transformed_acovf_fast')
arma_innovations_algo_fast = getattr(
_arma_innovations, prefix + 'arma_innovations_algo_fast')
arma_innovations_filter = getattr(
_arma_innovations, prefix + 'arma_innovations_filter')
# Run the innovations algorithm for ARMA coefficients
arma_acovf = arima_process.arma_acovf(ar, ma,
sigma2=sigma2, nobs=nobs) / sigma2
acovf, acovf2 = arma_transformed_acovf_fast(ar, ma, arma_acovf)
theta, v = arma_innovations_algo_fast(nobs, ar_params, ma_params,
acovf, acovf2)
v = np.array(v)
if (np.any(v < 0) or
not np.isfinite(theta).all() or
not np.isfinite(v).all()):
# This is defensive code that is hard to hit
raise ValueError(NON_STATIONARY_ERROR)
# Run the innovations filter across each series
u = []
for i in range(k_endog):
u_i = np.array(arma_innovations_filter(endog[:, i], ar_params,
ma_params, theta))
u.append(u_i)
u = np.vstack(u).T
if normalize:
u /= v[:, None]**0.5
# Post-processing
if squeezed:
u = u.squeeze()
return u, v | Compute innovations using a given ARMA process.
Parameters
----------
endog : ndarray
The observed time-series process, may be univariate or multivariate.
ar_params : ndarray, optional
Autoregressive parameters.
ma_params : ndarray, optional
Moving average parameters.
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
normalize : bool, optional
Whether or not to normalize the returned innovations. Default is False.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
innovations : ndarray
Innovations (one-step-ahead prediction errors) for the given `endog`
series with predictions based on the given ARMA process. If
`normalize=True`, then the returned innovations have been "whitened" by
dividing through by the square root of the mean square error.
innovations_mse : ndarray
Mean square error for the innovations. | arma_innovations | python | statsmodels/statsmodels | statsmodels/tsa/innovations/arma_innovations.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/innovations/arma_innovations.py | BSD-3-Clause |
def arma_loglike(endog, ar_params=None, ma_params=None, sigma2=1, prefix=None):
"""
Compute the log-likelihood of the given data assuming an ARMA process.
Parameters
----------
endog : ndarray
The observed time-series process.
ar_params : ndarray, optional
Autoregressive parameters.
ma_params : ndarray, optional
Moving average parameters.
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
float
The joint loglikelihood.
"""
llf_obs = arma_loglikeobs(endog, ar_params=ar_params, ma_params=ma_params,
sigma2=sigma2, prefix=prefix)
return np.sum(llf_obs) | Compute the log-likelihood of the given data assuming an ARMA process.
Parameters
----------
endog : ndarray
The observed time-series process.
ar_params : ndarray, optional
Autoregressive parameters.
ma_params : ndarray, optional
Moving average parameters.
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
float
The joint loglikelihood. | arma_loglike | python | statsmodels/statsmodels | statsmodels/tsa/innovations/arma_innovations.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/innovations/arma_innovations.py | BSD-3-Clause |
def arma_loglikeobs(endog, ar_params=None, ma_params=None, sigma2=1,
prefix=None):
"""
Compute the log-likelihood for each observation assuming an ARMA process.
Parameters
----------
endog : ndarray
The observed time-series process.
ar_params : ndarray, optional
Autoregressive parameters.
ma_params : ndarray, optional
Moving average parameters.
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
ndarray
Array of loglikelihood values for each observation.
"""
endog = np.array(endog)
ar_params = np.atleast_1d([] if ar_params is None else ar_params)
ma_params = np.atleast_1d([] if ma_params is None else ma_params)
if prefix is None:
prefix, dtype, _ = find_best_blas_type(
[endog, ar_params, ma_params, np.array(sigma2)])
dtype = prefix_dtype_map[prefix]
endog = np.ascontiguousarray(endog, dtype=dtype)
ar_params = np.asfortranarray(ar_params, dtype=dtype)
ma_params = np.asfortranarray(ma_params, dtype=dtype)
sigma2 = dtype(sigma2).item()
func = getattr(_arma_innovations, prefix + 'arma_loglikeobs_fast')
return func(endog, ar_params, ma_params, sigma2) | Compute the log-likelihood for each observation assuming an ARMA process.
Parameters
----------
endog : ndarray
The observed time-series process.
ar_params : ndarray, optional
Autoregressive parameters.
ma_params : ndarray, optional
Moving average parameters.
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
ndarray
Array of loglikelihood values for each observation. | arma_loglikeobs | python | statsmodels/statsmodels | statsmodels/tsa/innovations/arma_innovations.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/innovations/arma_innovations.py | BSD-3-Clause |
def arma_score(endog, ar_params=None, ma_params=None, sigma2=1,
prefix=None):
"""
Compute the score (gradient of the log-likelihood function).
Parameters
----------
endog : ndarray
The observed time-series process.
ar_params : ndarray, optional
Autoregressive coefficients, not including the zero lag.
ma_params : ndarray, optional
Moving average coefficients, not including the zero lag, where the sign
convention assumes the coefficients are part of the lag polynomial on
the right-hand-side of the ARMA definition (i.e. they have the same
sign from the usual econometrics convention in which the coefficients
are on the right-hand-side of the ARMA definition).
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
ndarray
Score, evaluated at the given parameters.
Notes
-----
This is a numerical approximation, calculated using first-order complex
step differentiation on the `arma_loglike` method.
"""
ar_params = [] if ar_params is None else ar_params
ma_params = [] if ma_params is None else ma_params
p = len(ar_params)
q = len(ma_params)
def func(params):
return arma_loglike(endog, params[:p], params[p:p + q], params[p + q:])
params0 = np.r_[ar_params, ma_params, sigma2]
epsilon = _get_epsilon(params0, 2., None, len(params0))
return approx_fprime_cs(params0, func, epsilon) | Compute the score (gradient of the log-likelihood function).
Parameters
----------
endog : ndarray
The observed time-series process.
ar_params : ndarray, optional
Autoregressive coefficients, not including the zero lag.
ma_params : ndarray, optional
Moving average coefficients, not including the zero lag, where the sign
convention assumes the coefficients are part of the lag polynomial on
the right-hand-side of the ARMA definition (i.e. they have the same
sign from the usual econometrics convention in which the coefficients
are on the right-hand-side of the ARMA definition).
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
ndarray
Score, evaluated at the given parameters.
Notes
-----
This is a numerical approximation, calculated using first-order complex
step differentiation on the `arma_loglike` method. | arma_score | python | statsmodels/statsmodels | statsmodels/tsa/innovations/arma_innovations.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/innovations/arma_innovations.py | BSD-3-Clause |
def arma_scoreobs(endog, ar_params=None, ma_params=None, sigma2=1,
prefix=None):
"""
Compute the score (gradient) per observation.
Parameters
----------
endog : ndarray
The observed time-series process.
ar_params : ndarray, optional
Autoregressive coefficients, not including the zero lag.
ma_params : ndarray, optional
Moving average coefficients, not including the zero lag, where the sign
convention assumes the coefficients are part of the lag polynomial on
the right-hand-side of the ARMA definition (i.e. they have the same
sign from the usual econometrics convention in which the coefficients
are on the right-hand-side of the ARMA definition).
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
ndarray
Score per observation, evaluated at the given parameters.
Notes
-----
This is a numerical approximation, calculated using first-order complex
step differentiation on the `arma_loglike` method.
"""
ar_params = [] if ar_params is None else ar_params
ma_params = [] if ma_params is None else ma_params
p = len(ar_params)
q = len(ma_params)
def func(params):
return arma_loglikeobs(endog, params[:p], params[p:p + q],
params[p + q:])
params0 = np.r_[ar_params, ma_params, sigma2]
epsilon = _get_epsilon(params0, 2., None, len(params0))
return approx_fprime_cs(params0, func, epsilon) | Compute the score (gradient) per observation.
Parameters
----------
endog : ndarray
The observed time-series process.
ar_params : ndarray, optional
Autoregressive coefficients, not including the zero lag.
ma_params : ndarray, optional
Moving average coefficients, not including the zero lag, where the sign
convention assumes the coefficients are part of the lag polynomial on
the right-hand-side of the ARMA definition (i.e. they have the same
sign from the usual econometrics convention in which the coefficients
are on the right-hand-side of the ARMA definition).
sigma2 : ndarray, optional
The ARMA innovation variance. Default is 1.
prefix : str, optional
The BLAS prefix associated with the datatype. Default is to find the
best datatype based on given input. This argument is typically only
used internally.
Returns
-------
ndarray
Score per observation, evaluated at the given parameters.
Notes
-----
This is a numerical approximation, calculated using first-order complex
step differentiation on the `arma_loglike` method. | arma_scoreobs | python | statsmodels/statsmodels | statsmodels/tsa/innovations/arma_innovations.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/innovations/arma_innovations.py | BSD-3-Clause |
def fix_params(self, params):
"""
Fix parameters to specific values (context manager)
Parameters
----------
params : dict
Dictionary describing the fixed parameter values, of the form
`param_name: fixed_value`. See the `param_names` property for valid
parameter names.
Examples
--------
>>> mod = sm.tsa.SARIMAX(endog, order=(1, 0, 1))
>>> with mod.fix_params({'ar.L1': 0.5}):
res = mod.fit()
"""
# Initialization (this is done here rather than in the constructor
# because param_names may not be available at that point)
if self._fixed_params is None:
self._fixed_params = {}
self._params_index = OrderedDict(
zip(self.param_names, np.arange(self.k_params))
)
# Cache the current fixed parameters
cache_fixed_params = self._fixed_params.copy()
cache_has_fixed_params = self._has_fixed_params
cache_fixed_params_index = self._fixed_params_index
cache_free_params_index = self._free_params_index
# Validate parameter names and values
all_fixed_param_names = (
set(params.keys()) | set(self._fixed_params.keys())
)
self._validate_can_fix_params(all_fixed_param_names)
# Set the new fixed parameters, keeping the order as given by
# param_names
self._fixed_params.update(params)
self._fixed_params = OrderedDict(
[
(name, self._fixed_params[name])
for name in self.param_names
if name in self._fixed_params
]
)
# Update associated values
self._has_fixed_params = True
self._fixed_params_index = [
self._params_index[key] for key in self._fixed_params.keys()
]
self._free_params_index = list(
set(np.arange(self.k_params)).difference(self._fixed_params_index)
)
try:
yield
finally:
# Reset the fixed parameters
self._has_fixed_params = cache_has_fixed_params
self._fixed_params = cache_fixed_params
self._fixed_params_index = cache_fixed_params_index
self._free_params_index = cache_free_params_index | Fix parameters to specific values (context manager)
Parameters
----------
params : dict
Dictionary describing the fixed parameter values, of the form
`param_name: fixed_value`. See the `param_names` property for valid
parameter names.
Examples
--------
>>> mod = sm.tsa.SARIMAX(endog, order=(1, 0, 1))
>>> with mod.fix_params({'ar.L1': 0.5}):
res = mod.fit() | fix_params | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def fit_constrained(self, constraints, start_params=None, **fit_kwds):
"""
Fit the model with some parameters subject to equality constraints.
Parameters
----------
constraints : dict
Dictionary of constraints, of the form `param_name: fixed_value`.
See the `param_names` property for valid parameter names.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the remaining parameters.
Returns
-------
results : Results instance
Examples
--------
>>> mod = sm.tsa.SARIMAX(endog, order=(1, 0, 1))
>>> res = mod.fit_constrained({'ar.L1': 0.5})
"""
with self.fix_params(constraints):
res = self.fit(start_params, **fit_kwds)
return res | Fit the model with some parameters subject to equality constraints.
Parameters
----------
constraints : dict
Dictionary of constraints, of the form `param_name: fixed_value`.
See the `param_names` property for valid parameter names.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the remaining parameters.
Returns
-------
results : Results instance
Examples
--------
>>> mod = sm.tsa.SARIMAX(endog, order=(1, 0, 1))
>>> res = mod.fit_constrained({'ar.L1': 0.5}) | fit_constrained | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def start_params(self):
"""
(array) Starting parameters for maximum likelihood estimation.
"""
if hasattr(self, "_start_params"):
return self._start_params
else:
raise NotImplementedError | (array) Starting parameters for maximum likelihood estimation. | start_params | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def param_names(self):
"""
(list of str) List of human readable parameter names (for parameters
actually included in the model).
"""
if hasattr(self, "_param_names"):
return self._param_names
else:
try:
names = ["param.%d" % i for i in range(len(self.start_params))]
except NotImplementedError:
names = []
return names | (list of str) List of human readable parameter names (for parameters
actually included in the model). | param_names | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def from_formula(
cls, formula, data, subset=None, drop_cols=None, *args, **kwargs
):
"""
Not implemented for state space models
"""
raise NotImplementedError | Not implemented for state space models | from_formula | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def _hessian_complex_step(self, params, **kwargs):
"""
Hessian matrix computed by second-order complex-step differentiation
on the `loglike` function.
"""
# the default epsilon can be too small
epsilon = _get_epsilon(params, 3., None, len(params))
kwargs['transformed'] = True
kwargs['complex_step'] = True
hessian = approx_hess_cs(
params, self.loglike, epsilon=epsilon, kwargs=kwargs)
# TODO: changed this to nobs_effective, has to be changed when merging
# with statespace mlemodel
return hessian / (self.nobs_effective) | Hessian matrix computed by second-order complex-step differentiation
on the `loglike` function. | _hessian_complex_step | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def aic(self):
"""
(float) Akaike Information Criterion
"""
return aic(self.llf, self.nobs_effective, self.df_model) | (float) Akaike Information Criterion | aic | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def aicc(self):
"""
(float) Akaike Information Criterion with small sample correction
"""
return aicc(self.llf, self.nobs_effective, self.df_model) | (float) Akaike Information Criterion with small sample correction | aicc | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def bic(self):
"""
(float) Bayes Information Criterion
"""
return bic(self.llf, self.nobs_effective, self.df_model) | (float) Bayes Information Criterion | bic | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def hqic(self):
"""
(float) Hannan-Quinn Information Criterion
"""
# return (-2 * self.llf +
# 2 * np.log(np.log(self.nobs_effective)) * self.df_model)
return hqic(self.llf, self.nobs_effective, self.df_model) | (float) Hannan-Quinn Information Criterion | hqic | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def llf(self):
"""
(float) The value of the log-likelihood function evaluated at `params`.
"""
raise NotImplementedError | (float) The value of the log-likelihood function evaluated at `params`. | llf | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def mae(self):
"""
(float) Mean absolute error
"""
return np.mean(np.abs(self.resid)) | (float) Mean absolute error | mae | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def mse(self):
"""
(float) Mean squared error
"""
return self.sse / self.nobs | (float) Mean squared error | mse | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def pvalues(self):
"""
(array) The p-values associated with the z-statistics of the
coefficients. Note that the coefficients are assumed to have a Normal
distribution.
"""
pvalues = np.zeros_like(self.zvalues) * np.nan
mask = np.ones_like(pvalues, dtype=bool)
mask[self._free_params_index] = True
mask &= ~np.isnan(self.zvalues)
pvalues[mask] = norm.sf(np.abs(self.zvalues[mask])) * 2
return pvalues | (array) The p-values associated with the z-statistics of the
coefficients. Note that the coefficients are assumed to have a Normal
distribution. | pvalues | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def sse(self):
"""
(float) Sum of squared errors
"""
return np.sum(self.resid ** 2) | (float) Sum of squared errors | sse | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def zvalues(self):
"""
(array) The z-statistics for the coefficients.
"""
return self.params / self.bse | (array) The z-statistics for the coefficients. | zvalues | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def _get_prediction_start_index(self, anchor):
"""Returns a valid numeric start index for predictions/simulations"""
if anchor is None or anchor == "start":
iloc = 0
elif anchor == "end":
iloc = self.nobs
else:
iloc, _, _ = self.model._get_index_loc(anchor)
if isinstance(iloc, slice):
iloc = iloc.start
iloc += 1 # anchor is one before start of prediction/simulation
if iloc < 0:
iloc = self.nobs + iloc
if iloc > self.nobs:
raise ValueError("Cannot anchor simulation outside of the sample.")
return iloc | Returns a valid numeric start index for predictions/simulations | _get_prediction_start_index | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def cov_params_approx(self):
"""
(array) The variance / covariance matrix. Computed using the numerical
Hessian approximated by complex step or finite differences methods.
"""
return self._cov_params_approx(
self._cov_approx_complex_step, self._cov_approx_centered
) | (array) The variance / covariance matrix. Computed using the numerical
Hessian approximated by complex step or finite differences methods. | cov_params_approx | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def test_serial_correlation(self, method, lags=None):
"""
Ljung-Box test for no serial correlation of standardized residuals
Null hypothesis is no serial correlation.
Parameters
----------
method : {'ljungbox', 'boxpierce', None}
The statistical test for serial correlation. If None, an attempt is
made to select an appropriate test.
lags : None, int or array_like
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag
length.
If lags is a list or array, then all lags are included up to the
largest lag in the list, however only the tests for the lags in the
list are reported.
If lags is None, then the default maxlag is min(10, nobs//5) for
non-seasonal time series and min (2*m, nobs//5) for seasonal time
series.
Returns
-------
output : ndarray
An array with `(test_statistic, pvalue)` for each endogenous
variable and each lag. The array is then sized
`(k_endog, 2, lags)`. If the method is called as
`ljungbox = res.test_serial_correlation()`, then `ljungbox[i]`
holds the results of the Ljung-Box test (as would be returned by
`statsmodels.stats.diagnostic.acorr_ljungbox`) for the `i` th
endogenous variable.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
Ljung-Box test for serial correlation.
Notes
-----
For statespace models: let `d` = max(loglikelihood_burn, nobs_diffuse);
this test is calculated ignoring the first `d` residuals.
Output is nan for any endogenous variable which has missing values.
"""
if method is None:
method = 'ljungbox'
if self.standardized_forecasts_error is None:
raise ValueError('Cannot compute test statistic when standardized'
' forecast errors have not been computed.')
if method == 'ljungbox' or method == 'boxpierce':
from statsmodels.stats.diagnostic import acorr_ljungbox
if hasattr(self, "loglikelihood_burn"):
d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)
# This differs from self.nobs_effective because here we want to
# exclude exact diffuse periods, whereas self.nobs_effective
# only excludes explicitly burned (usually approximate diffuse)
# periods.
nobs_effective = self.nobs - d
else:
nobs_effective = self.nobs_effective
output = []
# Default lags for acorr_ljungbox is 40, but may not always have
# that many observations
if lags is None:
seasonal_periods = getattr(self.model, "seasonal_periods", 0)
if seasonal_periods:
lags = min(2 * seasonal_periods, nobs_effective // 5)
else:
lags = min(10, nobs_effective // 5)
cols = [2, 3] if method == 'boxpierce' else [0, 1]
for i in range(self.model.k_endog):
if hasattr(self, "filter_results"):
x = self.filter_results.standardized_forecasts_error[i][d:]
else:
x = self.standardized_forecasts_error
results = acorr_ljungbox(
x, lags=lags, boxpierce=(method == 'boxpierce')
)
output.append(np.asarray(results)[:, cols].T)
output = np.c_[output]
else:
raise NotImplementedError('Invalid serial correlation test'
' method.')
return output | Ljung-Box test for no serial correlation of standardized residuals
Null hypothesis is no serial correlation.
Parameters
----------
method : {'ljungbox', 'boxpierce', None}
The statistical test for serial correlation. If None, an attempt is
made to select an appropriate test.
lags : None, int or array_like
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag
length.
If lags is a list or array, then all lags are included up to the
largest lag in the list, however only the tests for the lags in the
list are reported.
If lags is None, then the default maxlag is min(10, nobs//5) for
non-seasonal time series and min (2*m, nobs//5) for seasonal time
series.
Returns
-------
output : ndarray
An array with `(test_statistic, pvalue)` for each endogenous
variable and each lag. The array is then sized
`(k_endog, 2, lags)`. If the method is called as
`ljungbox = res.test_serial_correlation()`, then `ljungbox[i]`
holds the results of the Ljung-Box test (as would be returned by
`statsmodels.stats.diagnostic.acorr_ljungbox`) for the `i` th
endogenous variable.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
Ljung-Box test for serial correlation.
Notes
-----
For statespace models: let `d` = max(loglikelihood_burn, nobs_diffuse);
this test is calculated ignoring the first `d` residuals.
Output is nan for any endogenous variable which has missing values. | test_serial_correlation | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def test_normality(self, method):
"""
Test for normality of standardized residuals.
Null hypothesis is normality.
Parameters
----------
method : {'jarquebera', None}
The statistical test for normality. Must be 'jarquebera' for
Jarque-Bera normality test. If None, an attempt is made to select
an appropriate test.
See Also
--------
statsmodels.stats.stattools.jarque_bera
The Jarque-Bera test of normality.
Notes
-----
For statespace models: let `d` = max(loglikelihood_burn, nobs_diffuse);
this test is calculated ignoring the first `d` residuals.
In the case of missing data, the maintained hypothesis is that the
data are missing completely at random. This test is then run on the
standardized residuals excluding those corresponding to missing
observations.
"""
if method is None:
method = 'jarquebera'
if self.standardized_forecasts_error is None:
raise ValueError('Cannot compute test statistic when standardized'
' forecast errors have not been computed.')
if method == 'jarquebera':
from statsmodels.stats.stattools import jarque_bera
if hasattr(self, "loglikelihood_burn"):
d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)
else:
d = 0
output = []
for i in range(self.model.k_endog):
if hasattr(self, "fiter_results"):
resid = self.filter_results.standardized_forecasts_error[
i, d:
]
else:
resid = self.standardized_forecasts_error
mask = ~np.isnan(resid)
output.append(jarque_bera(resid[mask]))
else:
raise NotImplementedError('Invalid normality test method.')
return np.array(output) | Test for normality of standardized residuals.
Null hypothesis is normality.
Parameters
----------
method : {'jarquebera', None}
The statistical test for normality. Must be 'jarquebera' for
Jarque-Bera normality test. If None, an attempt is made to select
an appropriate test.
See Also
--------
statsmodels.stats.stattools.jarque_bera
The Jarque-Bera test of normality.
Notes
-----
For statespace models: let `d` = max(loglikelihood_burn, nobs_diffuse);
this test is calculated ignoring the first `d` residuals.
In the case of missing data, the maintained hypothesis is that the
data are missing completely at random. This test is then run on the
standardized residuals excluding those corresponding to missing
observations. | test_normality | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def summary(
self,
alpha=0.05,
start=None,
title=None,
model_name=None,
display_params=True,
):
"""
Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
model_name : str
The name of the model used. Default is to use model class name.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
# Model specification results
model = self.model
if title is None:
title = "Statespace Model Results"
if start is None:
start = 0
if self.model._index_dates:
ix = self.model._index
d = ix[start]
sample = ["%02d-%02d-%02d" % (d.month, d.day, d.year)]
d = ix[-1]
sample += ["- " + "%02d-%02d-%02d" % (d.month, d.day, d.year)]
else:
sample = [str(start), " - " + str(self.nobs)]
# Standardize the model name as a list of str
if model_name is None:
model_name = model.__class__.__name__
# Diagnostic tests results
try:
het = self.test_heteroskedasticity(method="breakvar")
except Exception: # FIXME: catch something specific
het = np.array([[np.nan] * 2])
try:
lb = self.test_serial_correlation(method="ljungbox")
except Exception: # FIXME: catch something specific
lb = np.array([[np.nan] * 2]).reshape(1, 2, 1)
try:
jb = self.test_normality(method="jarquebera")
except Exception: # FIXME: catch something specific
jb = np.array([[np.nan] * 4])
# Create the tables
if not isinstance(model_name, list):
model_name = [model_name]
top_left = [("Dep. Variable:", None)]
top_left.append(("Model:", [model_name[0]]))
for i in range(1, len(model_name)):
top_left.append(("", ["+ " + model_name[i]]))
top_left += [
("Date:", None),
("Time:", None),
("Sample:", [sample[0]]),
("", [sample[1]]),
]
top_right = [
("No. Observations:", [self.nobs]),
("Log Likelihood", ["%#5.3f" % self.llf]),
]
if hasattr(self, "rsquared"):
top_right.append(("R-squared:", ["%#8.3f" % self.rsquared]))
top_right += [
("AIC", ["%#5.3f" % self.aic]),
("BIC", ["%#5.3f" % self.bic]),
("HQIC", ["%#5.3f" % self.hqic]),
]
if hasattr(self, "filter_results"):
if (
self.filter_results is not None
and self.filter_results.filter_concentrated
):
top_right.append(("Scale", ["%#5.3f" % self.scale]))
else:
top_right.append(("Scale", ["%#5.3f" % self.scale]))
if hasattr(self, "cov_type"):
top_left.append(("Covariance Type:", [self.cov_type]))
format_str = lambda array: [ # noqa:E731
", ".join([f"{i:.2f}" for i in array])
]
diagn_left = [
("Ljung-Box (Q):", format_str(lb[:, 0, -1])),
("Prob(Q):", format_str(lb[:, 1, -1])),
("Heteroskedasticity (H):", format_str(het[:, 0])),
("Prob(H) (two-sided):", format_str(het[:, 1])),
]
diagn_right = [
("Jarque-Bera (JB):", format_str(jb[:, 0])),
("Prob(JB):", format_str(jb[:, 1])),
("Skew:", format_str(jb[:, 2])),
("Kurtosis:", format_str(jb[:, 3])),
]
summary = Summary()
summary.add_table_2cols(
self, gleft=top_left, gright=top_right, title=title
)
if len(self.params) > 0 and display_params:
summary.add_table_params(
self, alpha=alpha, xname=self.param_names, use_t=False
)
summary.add_table_2cols(
self, gleft=diagn_left, gright=diagn_right, title=""
)
# Add warnings/notes, added to text format only
etext = []
if hasattr(self, "cov_type") and "description" in self.cov_kwds:
etext.append(self.cov_kwds["description"])
if self._rank < (len(self.params) - len(self.fixed_params)):
cov_params = self.cov_params()
if len(self.fixed_params) > 0:
mask = np.ix_(self._free_params_index, self._free_params_index)
cov_params = cov_params[mask]
etext.append(
"Covariance matrix is singular or near-singular,"
" with condition number %6.3g. Standard errors may be"
" unstable." % _safe_cond(cov_params)
)
if etext:
etext = [
f"[{i + 1}] {text}" for i, text in enumerate(etext)
]
etext.insert(0, "Warnings:")
summary.add_extra_txt(etext)
return summary | Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
model_name : str
The name of the model used. Default is to use model class name.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary | summary | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/base.py | BSD-3-Clause |
def set_initialization_method(
self,
initialization_method,
initial_level=None,
initial_trend=None,
initial_seasonal=None,
):
"""
Sets a new initialization method for the state space model.
Parameters
----------
initialization_method : str, optional
Method for initialization of the state space model. One of:
* 'estimated' (default)
* 'heuristic'
* 'known'
If 'known' initialization is used, then `initial_level` must be
passed, as well as `initial_trend` and `initial_seasonal` if
applicable.
'heuristic' uses a heuristic based on the data to estimate initial
level, trend, and seasonal state. 'estimated' uses the same
heuristic as initial guesses, but then estimates the initial states
as part of the fitting process. Default is 'estimated'.
initial_level : float, optional
The initial level component. Only used if initialization is
'known'.
initial_trend : float, optional
The initial trend component. Only used if initialization is
'known'.
initial_seasonal : array_like, optional
The initial seasonal component. An array of length
`seasonal_periods`. Only used if initialization is 'known'.
"""
self.initialization_method = string_like(
initialization_method,
"initialization_method",
options=("estimated", "known", "heuristic"),
)
if self.initialization_method == "known":
if initial_level is None:
raise ValueError(
"`initial_level` argument must be provided"
' when initialization method is set to "known".'
)
if self.has_trend and initial_trend is None:
raise ValueError(
"`initial_trend` argument must be provided"
" for models with a trend component when"
' initialization method is set to "known".'
)
if self.has_seasonal and initial_seasonal is None:
raise ValueError(
"`initial_seasonal` argument must be provided"
" for models with a seasonal component when"
' initialization method is set to "known".'
)
elif self.initialization_method == "heuristic":
(
initial_level,
initial_trend,
initial_seasonal,
) = _initialization_heuristic(
self.endog,
trend=self.trend,
seasonal=self.seasonal,
seasonal_periods=self.seasonal_periods,
)
elif self.initialization_method == "estimated":
if self.nobs < 10 + 2 * (self.seasonal_periods // 2):
(
initial_level,
initial_trend,
initial_seasonal,
) = _initialization_simple(
self.endog,
trend=self.trend,
seasonal=self.seasonal,
seasonal_periods=self.seasonal_periods,
)
else:
(
initial_level,
initial_trend,
initial_seasonal,
) = _initialization_heuristic(
self.endog,
trend=self.trend,
seasonal=self.seasonal,
seasonal_periods=self.seasonal_periods,
)
if not self.has_trend:
initial_trend = 0
if not self.has_seasonal:
initial_seasonal = 0
self.initial_level = initial_level
self.initial_trend = initial_trend
self.initial_seasonal = initial_seasonal
# we also have to reset the params index dictionaries
self._internal_params_index = OrderedDict(
zip(self._internal_param_names, np.arange(self._k_params_internal))
)
self._params_index = OrderedDict(
zip(self.param_names, np.arange(self.k_params))
) | Sets a new initialization method for the state space model.
Parameters
----------
initialization_method : str, optional
Method for initialization of the state space model. One of:
* 'estimated' (default)
* 'heuristic'
* 'known'
If 'known' initialization is used, then `initial_level` must be
passed, as well as `initial_trend` and `initial_seasonal` if
applicable.
'heuristic' uses a heuristic based on the data to estimate initial
level, trend, and seasonal state. 'estimated' uses the same
heuristic as initial guesses, but then estimates the initial states
as part of the fitting process. Default is 'estimated'.
initial_level : float, optional
The initial level component. Only used if initialization is
'known'.
initial_trend : float, optional
The initial trend component. Only used if initialization is
'known'.
initial_seasonal : array_like, optional
The initial seasonal component. An array of length
`seasonal_periods`. Only used if initialization is 'known'. | set_initialization_method | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def set_bounds(self, bounds):
"""
Set bounds for parameter estimation.
Parameters
----------
bounds : dict or None, optional
A dictionary with parameter names as keys and the respective bounds
intervals as values (lists/tuples/arrays).
The available parameter names are in ``self.param_names``.
The default option is ``None``, in which case the traditional
(nonlinear) bounds as described in [1]_ are used.
References
----------
.. [1] Hyndman, R.J., & Athanasopoulos, G. (2019) *Forecasting:
principles and practice*, 3rd edition, OTexts: Melbourne,
Australia. OTexts.com/fpp3. Accessed on April 19th 2020.
"""
if bounds is None:
self.bounds = {}
else:
if not isinstance(bounds, (dict, OrderedDict)):
raise ValueError("bounds must be a dictionary")
for key in bounds:
if key not in self.param_names:
raise ValueError(
f"Invalid key: {key} in bounds dictionary"
)
bounds[key] = array_like(
bounds[key], f"bounds[{key}]", shape=(2,)
)
self.bounds = bounds | Set bounds for parameter estimation.
Parameters
----------
bounds : dict or None, optional
A dictionary with parameter names as keys and the respective bounds
intervals as values (lists/tuples/arrays).
The available parameter names are in ``self.param_names``.
The default option is ``None``, in which case the traditional
(nonlinear) bounds as described in [1]_ are used.
References
----------
.. [1] Hyndman, R.J., & Athanasopoulos, G. (2019) *Forecasting:
principles and practice*, 3rd edition, OTexts: Melbourne,
Australia. OTexts.com/fpp3. Accessed on April 19th 2020. | set_bounds | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def prepare_data(data):
"""
Prepare data for use in the state space representation
"""
endog = np.require(data.orig_endog, requirements="WC")
if endog.ndim != 1:
raise ValueError("endog must be 1-dimensional")
if endog.dtype != np.double:
endog = np.require(
data.orig_endog, requirements="WC", dtype=float
)
return endog, None | Prepare data for use in the state space representation | prepare_data | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def _internal_params(self, params):
"""
Converts a parameter array passed from outside to the internally used
full parameter array.
"""
# internal params that are not needed are all set to zero, except phi,
# which is one
internal = np.zeros(self._k_params_internal, dtype=params.dtype)
for i, name in enumerate(self.param_names):
internal_idx = self._internal_params_index[name]
internal[internal_idx] = params[i]
if not self.damped_trend:
internal[3] = 1 # phi is 4th parameter
if self.initialization_method != "estimated":
internal[4] = self.initial_level
internal[5] = self.initial_trend
if np.isscalar(self.initial_seasonal):
internal[6:] = self.initial_seasonal
else:
# See GH 7893
internal[6:] = self.initial_seasonal[::-1]
return internal | Converts a parameter array passed from outside to the internally used
full parameter array. | _internal_params | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def _model_params(self, internal):
"""
Converts internal parameters to model parameters
"""
params = np.empty(self.k_params)
for i, name in enumerate(self.param_names):
internal_idx = self._internal_params_index[name]
params[i] = internal[internal_idx]
return params | Converts internal parameters to model parameters | _model_params | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def _get_internal_states(self, states, params):
"""
Converts a state matrix/dataframe to the (nobs, 2+m) matrix used
internally
"""
internal_params = self._internal_params(params)
if isinstance(states, (pd.Series, pd.DataFrame)):
states = states.values
internal_states = np.zeros((self.nobs, 2 + self.seasonal_periods))
internal_states[:, 0] = states[:, 0]
if self.has_trend:
internal_states[:, 1] = states[:, 1]
if self.has_seasonal:
for j in range(self.seasonal_periods):
internal_states[j:, 2 + j] = states[
0 : self.nobs - j, self._seasonal_index
]
internal_states[0:j, 2 + j] = internal_params[6 : 6 + j][::-1]
return internal_states | Converts a state matrix/dataframe to the (nobs, 2+m) matrix used
internally | _get_internal_states | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def _start_params(self):
"""
Default start params in the format of external parameters.
This should not be called directly, but by calling
``self.start_params``.
"""
params = []
for p in self._smoothing_param_names:
if p in self.param_names:
params.append(self._default_start_params[p])
if self.initialization_method == "estimated":
lvl_idx = len(params)
params += [self.initial_level]
if self.has_trend:
params += [self.initial_trend]
if self.has_seasonal:
# we have to adapt the seasonal values a bit to make sure the
# problem is well posed (see implementation notes above)
initial_seasonal = self.initial_seasonal
if self.seasonal == "mul":
params[lvl_idx] *= initial_seasonal[-1]
initial_seasonal /= initial_seasonal[-1]
else:
params[lvl_idx] += initial_seasonal[-1]
initial_seasonal -= initial_seasonal[-1]
params += initial_seasonal.tolist()
return np.array(params) | Default start params in the format of external parameters.
This should not be called directly, but by calling
``self.start_params``. | _start_params | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def _convert_and_bound_start_params(self, params):
"""
This converts start params to internal params, sets internal-only
parameters as bounded, sets bounds for fixed parameters, and then makes
sure that all start parameters are within the specified bounds.
"""
internal_params = self._internal_params(params)
# set bounds for missing and fixed
for p in self._internal_param_names:
idx = self._internal_params_index[p]
if p not in self.param_names:
# any missing parameters are set to the value they got from the
# call to _internal_params
self.bounds[p] = [internal_params[idx]] * 2
elif self._has_fixed_params and p in self._fixed_params:
self.bounds[p] = [self._fixed_params[p]] * 2
# make sure everything is within bounds
if p in self.bounds:
internal_params[idx] = np.clip(
internal_params[idx]
+ 1e-3, # try not to start on boundary
*self.bounds[p],
)
return internal_params | This converts start params to internal params, sets internal-only
parameters as bounded, sets bounds for fixed parameters, and then makes
sure that all start parameters are within the specified bounds. | _convert_and_bound_start_params | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def _loglike_internal(
self,
params,
yhat,
xhat,
is_fixed=None,
fixed_values=None,
use_beta_star=False,
use_gamma_star=False,
):
"""
Log-likelihood function to be called from fit to avoid reallocation of
memory.
Parameters
----------
params : np.ndarray of np.float
Model parameters: (alpha, beta, gamma, phi, l[-1],
b[-1], s[-1], ..., s[-m]). If there are no fixed values this must
be in the format of internal parameters. Otherwise the fixed values
are skipped.
yhat : np.ndarray
Array of size (n,) where fitted values will be written to.
xhat : np.ndarray
Array of size (n, _k_states_internal) where fitted states will be
written to.
is_fixed : np.ndarray or None
Boolean array indicating values which are fixed during fitting.
This must have the full length of internal parameters.
fixed_values : np.ndarray or None
Array of fixed values (arbitrary values for non-fixed parameters)
This must have the full length of internal parameters.
use_beta_star : boolean
Whether to internally use beta_star as parameter
use_gamma_star : boolean
Whether to internally use gamma_star as parameter
"""
if np.iscomplexobj(params):
data = np.asarray(self.endog, dtype=complex)
else:
data = self.endog
if is_fixed is None:
is_fixed = np.zeros(self._k_params_internal, dtype=np.int64)
fixed_values = np.empty(
self._k_params_internal, dtype=params.dtype
)
else:
is_fixed = np.ascontiguousarray(is_fixed, dtype=np.int64)
self._smoothing_func(
params,
data,
yhat,
xhat,
is_fixed,
fixed_values,
use_beta_star,
use_gamma_star,
)
res = self._residuals(yhat, data=data)
logL = -self.nobs / 2 * (np.log(2 * np.pi * np.mean(res ** 2)) + 1)
if self.error == "mul":
# In some cases, yhat can become negative or zero, so that a
# multiplicative model is no longer well-defined. Zero values
# are replaced with 10^-32 (a very small number). For more
# information on the derivation of the log-likelihood for the
# multiplicative error models see:
# https://openforecast.org/adam/ADAMETSEstimationLikelihood.html
yhat[yhat == 0] = 1e-32
logL -= np.sum(np.log(np.abs(yhat)))
return logL | Log-likelihood function to be called from fit to avoid reallocation of
memory.
Parameters
----------
params : np.ndarray of np.float
Model parameters: (alpha, beta, gamma, phi, l[-1],
b[-1], s[-1], ..., s[-m]). If there are no fixed values this must
be in the format of internal parameters. Otherwise the fixed values
are skipped.
yhat : np.ndarray
Array of size (n,) where fitted values will be written to.
xhat : np.ndarray
Array of size (n, _k_states_internal) where fitted states will be
written to.
is_fixed : np.ndarray or None
Boolean array indicating values which are fixed during fitting.
This must have the full length of internal parameters.
fixed_values : np.ndarray or None
Array of fixed values (arbitrary values for non-fixed parameters)
This must have the full length of internal parameters.
use_beta_star : boolean
Whether to internally use beta_star as parameter
use_gamma_star : boolean
Whether to internally use gamma_star as parameter | _loglike_internal | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def _residuals(self, yhat, data=None):
"""Calculates residuals of a prediction"""
if data is None:
data = self.endog
if self.error == "mul":
return (data - yhat) / yhat
else:
return data - yhat | Calculates residuals of a prediction | _residuals | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def _smooth(self, params):
"""
Exponential smoothing with given parameters
Parameters
----------
params : array_like
Model parameters
Returns
-------
yhat : pd.Series or np.ndarray
Predicted values from exponential smoothing. If original data was a
``pd.Series``, returns a ``pd.Series``, else a ``np.ndarray``.
xhat : pd.DataFrame or np.ndarray
Internal states of exponential smoothing. If original data was a
``pd.Series``, returns a ``pd.DataFrame``, else a ``np.ndarray``.
"""
internal_params = self._internal_params(params)
yhat = np.zeros(self.nobs)
xhat = np.zeros((self.nobs, self._k_states_internal))
is_fixed = np.zeros(self._k_params_internal, dtype=np.int64)
fixed_values = np.empty(self._k_params_internal, dtype=params.dtype)
self._smoothing_func(
internal_params, self.endog, yhat, xhat, is_fixed, fixed_values
)
# remove states that are only internal
states = self._get_states(xhat)
if self.use_pandas:
_, _, _, index = self._get_prediction_index(0, self.nobs - 1)
yhat = pd.Series(yhat, index=index)
statenames = ["level"]
if self.has_trend:
statenames += ["trend"]
if self.has_seasonal:
statenames += ["seasonal"]
states = pd.DataFrame(states, index=index, columns=statenames)
return yhat, states | Exponential smoothing with given parameters
Parameters
----------
params : array_like
Model parameters
Returns
-------
yhat : pd.Series or np.ndarray
Predicted values from exponential smoothing. If original data was a
``pd.Series``, returns a ``pd.Series``, else a ``np.ndarray``.
xhat : pd.DataFrame or np.ndarray
Internal states of exponential smoothing. If original data was a
``pd.Series``, returns a ``pd.DataFrame``, else a ``np.ndarray``. | _smooth | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def smooth(self, params, return_raw=False):
"""
Exponential smoothing with given parameters
Parameters
----------
params : array_like
Model parameters
return_raw : bool, optional
Whether to return only the state space results or the full results
object. Default is ``False``.
Returns
-------
result : ETSResultsWrapper or tuple
If ``return_raw=False``, returns a ETSResultsWrapper
object. Otherwise a tuple of arrays or pandas objects, depending on
the format of the endog data.
"""
params = np.asarray(params)
results = self._smooth(params)
return self._wrap_results(params, results, return_raw) | Exponential smoothing with given parameters
Parameters
----------
params : array_like
Model parameters
return_raw : bool, optional
Whether to return only the state space results or the full results
object. Default is ``False``.
Returns
-------
result : ETSResultsWrapper or tuple
If ``return_raw=False``, returns a ETSResultsWrapper
object. Otherwise a tuple of arrays or pandas objects, depending on
the format of the endog data. | smooth | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def llf(self):
"""
log-likelihood function evaluated at the fitted params
"""
return self._llf | log-likelihood function evaluated at the fitted params | llf | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def _get_prediction_params(self, start_idx):
"""
Returns internal parameter representation of smoothing parameters and
"initial" states for prediction/simulation, that is the states just
before the first prediction/simulation step.
"""
internal_params = self.model._internal_params(self.params)
if start_idx == 0:
return internal_params
else:
internal_states = self.model._get_internal_states(
self.states, self.params
)
start_state = np.empty(6 + self.seasonal_periods)
start_state[0:4] = internal_params[0:4]
start_state[4:] = internal_states[start_idx - 1, :]
return start_state | Returns internal parameter representation of smoothing parameters and
"initial" states for prediction/simulation, that is the states just
before the first prediction/simulation step. | _get_prediction_params | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def _relative_forecast_variance(self, steps):
"""
References
----------
.. [1] Hyndman, R.J., & Athanasopoulos, G. (2019) *Forecasting:
principles and practice*, 3rd edition, OTexts: Melbourne,
Australia. OTexts.com/fpp3. Accessed on April 19th 2020.
"""
h = steps
alpha = self.smoothing_level
if self.has_trend:
beta = self.smoothing_trend
if self.has_seasonal:
gamma = self.smoothing_seasonal
m = self.seasonal_periods
k = np.asarray((h - 1) / m, dtype=int)
if self.damped_trend:
phi = self.damping_trend
model = self.model.short_name
if model == "ANN":
return 1 + alpha ** 2 * (h - 1)
elif model == "AAN":
return 1 + (h - 1) * (
alpha ** 2 + alpha * beta * h + beta ** 2 * h / 6 * (2 * h - 1)
)
elif model == "AAdN":
return (
1
+ alpha ** 2 * (h - 1)
+ (
(beta * phi * h)
/ ((1 - phi) ** 2)
* (2 * alpha * (1 - phi) + beta * phi)
)
- (
(beta * phi * (1 - phi ** h))
/ ((1 - phi) ** 2 * (1 - phi ** 2))
* (
2 * alpha * (1 - phi ** 2)
+ beta * phi * (1 + 2 * phi - phi ** h)
)
)
)
elif model == "ANA":
return 1 + alpha ** 2 * (h - 1) + gamma * k * (2 * alpha + gamma)
elif model == "AAA":
return (
1
+ (h - 1)
* (
alpha ** 2
+ alpha * beta * h
+ (beta ** 2) / 6 * h * (2 * h - 1)
)
+ gamma * k * (2 * alpha + gamma + beta * m * (k + 1))
)
elif model == "AAdA":
return (
1
+ alpha ** 2 * (h - 1)
+ gamma * k * (2 * alpha + gamma)
+ (beta * phi * h)
/ ((1 - phi) ** 2)
* (2 * alpha * (1 - phi) + beta * phi)
- (
(beta * phi * (1 - phi ** h))
/ ((1 - phi) ** 2 * (1 - phi ** 2))
* (
2 * alpha * (1 - phi ** 2)
+ beta * phi * (1 + 2 * phi - phi ** h)
)
)
+ (
(2 * beta * gamma * phi)
/ ((1 - phi) * (1 - phi ** m))
* (k * (1 - phi ** m) - phi ** m * (1 - phi ** (m * k)))
)
)
else:
raise NotImplementedError | References
----------
.. [1] Hyndman, R.J., & Athanasopoulos, G. (2019) *Forecasting:
principles and practice*, 3rd edition, OTexts: Melbourne,
Australia. OTexts.com/fpp3. Accessed on April 19th 2020. | _relative_forecast_variance | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def simulate(
self,
nsimulations,
anchor=None,
repetitions=1,
random_errors=None,
random_state=None,
):
r"""
Random simulations using the state space formulation.
Parameters
----------
nsimulations : int
The number of simulation steps.
anchor : int, str, or datetime, optional
First period for simulation. The simulation will be conditional on
all existing datapoints prior to the `anchor`. Type depends on the
index of the given `endog` in the model. Two special cases are the
strings 'start' and 'end'. `start` refers to beginning the
simulation at the first period of the sample (i.e. using the
initial values as simulation anchor), and `end` refers to
beginning the simulation at the first period after the sample.
Integer values can run from 0 to `nobs`, or can be negative to
apply negative indexing. Finally, if a date/time index was provided
to the model, then this argument can be a date string to parse or a
datetime type. Default is 'start'.
Note: `anchor` corresponds to the observation right before the
`start` observation in the `predict` method.
repetitions : int, optional
Number of simulated paths to generate. Default is 1 simulated path.
random_errors : optional
Specifies how the random errors should be obtained. Can be one of
the following:
* ``None``: Random normally distributed values with variance
estimated from the fit errors drawn from numpy's standard
RNG (can be seeded with the `random_state` argument). This is the
default option.
* A distribution function from ``scipy.stats``, e.g.
``scipy.stats.norm``: Fits the distribution function to the fit
errors and draws from the fitted distribution.
Note the difference between ``scipy.stats.norm`` and
``scipy.stats.norm()``, the latter one is a frozen distribution
function.
* A frozen distribution function from ``scipy.stats``, e.g.
``scipy.stats.norm(scale=2)``: Draws from the frozen distribution
function.
* A ``np.ndarray`` with shape (`nsimulations`, `repetitions`): Uses
the given values as random errors.
* ``"bootstrap"``: Samples the random errors from the fit errors.
random_state : int or np.random.RandomState, optional
A seed for the random number generator or a
``np.random.RandomState`` object. Only used if `random_errors` is
``None``. Default is ``None``.
Returns
-------
sim : pd.Series, pd.DataFrame or np.ndarray
An ``np.ndarray``, ``pd.Series``, or ``pd.DataFrame`` of simulated
values.
If the original data was a ``pd.Series`` or ``pd.DataFrame``, `sim`
will be a ``pd.Series`` if `repetitions` is 1, and a
``pd.DataFrame`` of shape (`nsimulations`, `repetitions`) else.
Otherwise, if `repetitions` is 1, a ``np.ndarray`` of shape
(`nsimulations`,) is returned, and if `repetitions` is not 1 a
``np.ndarray`` of shape (`nsimulations`, `repetitions`) is
returned.
"""
r"""
Implementation notes
--------------------
The simulation is based on the state space model of the Holt-Winter's
methods. The state space model assumes that the true value at time
:math:`t` is randomly distributed around the prediction value.
If using the additive error model, this means:
.. math::
y_t &= \hat{y}_{t|t-1} + e_t\\
e_t &\sim \mathcal{N}(0, \sigma^2)
Using the multiplicative error model:
.. math::
y_t &= \hat{y}_{t|t-1} \cdot (1 + e_t)\\
e_t &\sim \mathcal{N}(0, \sigma^2)
Inserting these equations into the smoothing equation formulation leads
to the state space equations. The notation used here follows
[1]_.
Additionally,
.. math::
B_t = b_{t-1} \circ_d \phi\\
L_t = l_{t-1} \circ_b B_t\\
S_t = s_{t-m}\\
Y_t = L_t \circ_s S_t,
where :math:`\circ_d` is the operation linking trend and damping
parameter (multiplication if the trend is additive, power if the trend
is multiplicative), :math:`\circ_b` is the operation linking level and
trend (addition if the trend is additive, multiplication if the trend
is multiplicative), and :math:'\circ_s` is the operation linking
seasonality to the rest.
The state space equations can then be formulated as
.. math::
y_t = Y_t + \eta \cdot e_t\\
l_t = L_t + \alpha \cdot (M_e \cdot L_t + \kappa_l) \cdot e_t\\
b_t = B_t + \beta \cdot (M_e \cdot B_t+\kappa_b) \cdot e_t\\
s_t = S_t + \gamma \cdot (M_e \cdot S_t + \kappa_s) \cdot e_t\\
with
.. math::
\eta &= \begin{cases}
Y_t\quad\text{if error is multiplicative}\\
1\quad\text{else}
\end{cases}\\
M_e &= \begin{cases}
1\quad\text{if error is multiplicative}\\
0\quad\text{else}
\end{cases}\\
and, when using the additive error model,
.. math::
\kappa_l &= \begin{cases}
\frac{1}{S_t}\quad
\text{if seasonality is multiplicative}\\
1\quad\text{else}
\end{cases}\\
\kappa_b &= \begin{cases}
\frac{\kappa_l}{l_{t-1}}\quad
\text{if trend is multiplicative}\\
\kappa_l\quad\text{else}
\end{cases}\\
\kappa_s &= \begin{cases}
\frac{1}{L_t}\quad
\text{if seasonality is multiplicative}\\
1\quad\text{else}
\end{cases}
When using the multiplicative error model
.. math::
\kappa_l &= \begin{cases}
0\quad
\text{if seasonality is multiplicative}\\
S_t\quad\text{else}
\end{cases}\\
\kappa_b &= \begin{cases}
\frac{\kappa_l}{l_{t-1}}\quad
\text{if trend is multiplicative}\\
\kappa_l + l_{t-1}\quad\text{else}
\end{cases}\\
\kappa_s &= \begin{cases}
0\quad\text{if seasonality is multiplicative}\\
L_t\quad\text{else}
\end{cases}
References
----------
.. [1] Hyndman, R.J., & Athanasopoulos, G. (2018) *Forecasting:
principles and practice*, 2nd edition, OTexts: Melbourne,
Australia. OTexts.com/fpp2. Accessed on February 28th 2020.
"""
# Get the starting location
start_idx = self._get_prediction_start_index(anchor)
# set initial values and obtain parameters
start_params = self._get_prediction_params(start_idx)
x = np.zeros((nsimulations, self.model._k_states_internal))
# is fixed and fixed values are dummy arguments
is_fixed = np.zeros(len(start_params), dtype=np.int64)
fixed_values = np.zeros_like(start_params)
(
alpha,
beta_star,
gamma_star,
phi,
m,
_,
) = smooth._initialize_ets_smooth(
start_params, x, is_fixed, fixed_values
)
beta = alpha * beta_star
gamma = (1 - alpha) * gamma_star
# make x a 3 dimensional matrix: first dimension is nsimulations
# (number of steps), next is number of states, innermost is repetitions
nstates = x.shape[1]
x = np.tile(np.reshape(x, (nsimulations, nstates, 1)), repetitions)
y = np.empty((nsimulations, repetitions))
# get random error eps
sigma = np.sqrt(self.scale)
if isinstance(random_errors, np.ndarray):
if random_errors.shape != (nsimulations, repetitions):
raise ValueError(
"If random is an ndarray, it must have shape "
"(nsimulations, repetitions)!"
)
eps = random_errors
elif random_errors == "bootstrap":
eps = np.random.choice(
self.resid, size=(nsimulations, repetitions), replace=True
)
elif random_errors is None:
if random_state is None:
eps = np.random.randn(nsimulations, repetitions) * sigma
elif isinstance(random_state, int):
rng = np.random.RandomState(random_state)
eps = rng.randn(nsimulations, repetitions) * sigma
elif isinstance(random_state, np.random.RandomState):
eps = random_state.randn(nsimulations, repetitions) * sigma
else:
raise ValueError(
"Argument random_state must be None, an integer, "
"or an instance of np.random.RandomState"
)
elif isinstance(random_errors, (rv_continuous, rv_discrete)):
params = random_errors.fit(self.resid)
eps = random_errors.rvs(*params, size=(nsimulations, repetitions))
elif isinstance(random_errors, rv_frozen):
eps = random_errors.rvs(size=(nsimulations, repetitions))
else:
raise ValueError("Argument random_errors has unexpected value!")
# get model settings
mul_seasonal = self.seasonal == "mul"
mul_trend = self.trend == "mul"
mul_error = self.error == "mul"
# define trend, damping and seasonality operations
if mul_trend:
op_b = np.multiply
op_d = np.power
else:
op_b = np.add
op_d = np.multiply
if mul_seasonal:
op_s = np.multiply
else:
op_s = np.add
# x translation:
# - x[t, 0, :] is level[t]
# - x[t, 1, :] is trend[t]
# - x[t, 2, :] is seasonal[t]
# - x[t, 3, :] is seasonal[t-1]
# - x[t, 2+j, :] is seasonal[t-j]
# - similarly: x[t-1, 2+m-1, :] is seasonal[t-m]
for t in range(nsimulations):
B = op_d(x[t - 1, 1, :], phi)
L = op_b(x[t - 1, 0, :], B)
S = x[t - 1, 2 + m - 1, :]
Y = op_s(L, S)
if self.error == "add":
eta = 1
kappa_l = 1 / S if mul_seasonal else 1
kappa_b = kappa_l / x[t - 1, 0, :] if mul_trend else kappa_l
kappa_s = 1 / L if mul_seasonal else 1
else:
eta = Y
kappa_l = 0 if mul_seasonal else S
kappa_b = (
kappa_l / x[t - 1, 0, :]
if mul_trend
else kappa_l + x[t - 1, 0, :]
)
kappa_s = 0 if mul_seasonal else L
y[t, :] = Y + eta * eps[t, :]
x[t, 0, :] = L + alpha * (mul_error * L + kappa_l) * eps[t, :]
x[t, 1, :] = B + beta * (mul_error * B + kappa_b) * eps[t, :]
x[t, 2, :] = S + gamma * (mul_error * S + kappa_s) * eps[t, :]
# update seasonals by shifting previous seasonal right
x[t, 3:, :] = x[t - 1, 2:-1, :]
# Wrap data / squeeze where appropriate
if repetitions > 1:
names = ["simulation.%d" % num for num in range(repetitions)]
else:
names = "simulation"
return self.model._wrap_data(
y, start_idx, start_idx + nsimulations - 1, names=names
) | r | simulate | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def forecast(self, steps=1):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, str, or datetime, optional
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency, steps
must be an integer. Default
Returns
-------
forecast : ndarray
Array of out of sample forecasts. A (steps x k_endog) array.
"""
return self._forecast(steps, "end") | Out-of-sample forecasts
Parameters
----------
steps : int, str, or datetime, optional
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency, steps
must be an integer. Default
Returns
-------
forecast : ndarray
Array of out of sample forecasts. A (steps x k_endog) array. | forecast | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def _forecast(self, steps, anchor):
"""
Dynamic prediction/forecasting
"""
# forecast is the same as simulation without errors
return self.simulate(
steps, anchor=anchor, random_errors=np.zeros((steps, 1))
) | Dynamic prediction/forecasting | _forecast | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def predict(self, start=None, end=None, dynamic=False, index=None):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
dynamic : bool, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
index : pd.Index, optional
Optionally an index to associate the predicted results to. If None,
an attempt is made to create an index for the predicted results
from the model's index or model's row labels.
Returns
-------
forecast : array_like or pd.Series.
Array of out of in-sample predictions and / or out-of-sample
forecasts. An (npredict,) array. If original data was a pd.Series
or DataFrame, a pd.Series is returned.
"""
(
start,
end,
start_smooth,
end_smooth,
anchor_dynamic,
_,
end_dynamic,
nsmooth,
ndynamic,
index,
) = self._handle_prediction_index(start, dynamic, end, index)
y = np.empty(nsmooth + ndynamic)
# In sample nondynamic prediction: smoothing
if nsmooth > 0:
y[0:nsmooth] = self.fittedvalues[start_smooth : end_smooth + 1]
# Out of sample/dynamic prediction: forecast
if ndynamic > 0:
y[nsmooth:] = self._forecast(ndynamic, anchor_dynamic)
# when we are doing out of sample only prediction, start > end + 1, and
# we only want to output beginning at start
if start > end + 1:
ndiscard = start - (end + 1)
y = y[ndiscard:]
# Wrap data / squeeze where appropriate
return self.model._wrap_data(y, start, end_dynamic) | In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
dynamic : bool, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
index : pd.Index, optional
Optionally an index to associate the predicted results to. If None,
an attempt is made to create an index for the predicted results
from the model's index or model's row labels.
Returns
-------
forecast : array_like or pd.Series.
Array of out of in-sample predictions and / or out-of-sample
forecasts. An (npredict,) array. If original data was a pd.Series
or DataFrame, a pd.Series is returned. | predict | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def get_prediction(
self,
start=None,
end=None,
dynamic=False,
index=None,
method=None,
simulate_repetitions=1000,
**simulate_kwargs,
):
"""
Calculates mean prediction and prediction intervals.
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
dynamic : bool, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
index : pd.Index, optional
Optionally an index to associate the predicted results to. If None,
an attempt is made to create an index for the predicted results
from the model's index or model's row labels.
method : str or None, optional
Method to use for calculating prediction intervals. 'exact'
(default, if available) or 'simulated'.
simulate_repetitions : int, optional
Number of simulation repetitions for calculating prediction
intervals when ``method='simulated'``. Default is 1000.
**simulate_kwargs :
Additional arguments passed to the ``simulate`` method.
Returns
-------
PredictionResults
Predicted mean values and prediction intervals
"""
return PredictionResultsWrapper(
PredictionResults(
self,
start,
end,
dynamic,
index,
method,
simulate_repetitions,
**simulate_kwargs,
)
) | Calculates mean prediction and prediction intervals.
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
dynamic : bool, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
index : pd.Index, optional
Optionally an index to associate the predicted results to. If None,
an attempt is made to create an index for the predicted results
from the model's index or model's row labels.
method : str or None, optional
Method to use for calculating prediction intervals. 'exact'
(default, if available) or 'simulated'.
simulate_repetitions : int, optional
Number of simulation repetitions for calculating prediction
intervals when ``method='simulated'``. Default is 1000.
**simulate_kwargs :
Additional arguments passed to the ``simulate`` method.
Returns
-------
PredictionResults
Predicted mean values and prediction intervals | get_prediction | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def summary(self, alpha=0.05, start=None):
"""
Summarize the fitted model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
model_name = f"ETS({self.short_name})"
summary = super().summary(
alpha=alpha,
start=start,
title="ETS Results",
model_name=model_name,
)
if self.model.initialization_method != "estimated":
params = np.array(self.initial_state)
if params.ndim > 1:
params = params[0]
names = self.model.initial_state_names
param_header = [
"initialization method: %s" % self.model.initialization_method
]
params_stubs = names
params_data = [
[forg(params[i], prec=4)] for i in range(len(params))
]
initial_state_table = SimpleTable(
params_data, param_header, params_stubs, txt_fmt=fmt_params
)
summary.tables.insert(-1, initial_state_table)
return summary | Summarize the fitted model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary | summary | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def var_pred_mean(self):
"""The variance of the predicted mean"""
return self.forecast_variance | The variance of the predicted mean | var_pred_mean | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def pred_int(self, alpha=0.05):
"""
Calculates prediction intervals by performing multiple simulations.
Parameters
----------
alpha : float, optional
The significance level for the prediction interval. Default is
0.05, that is, a 95% prediction interval.
"""
if self.method == "simulated":
simulated_upper_pi = np.quantile(
self.simulation_results, 1 - alpha / 2, axis=1
)
simulated_lower_pi = np.quantile(
self.simulation_results, alpha / 2, axis=1
)
pred_int = np.vstack((simulated_lower_pi, simulated_upper_pi)).T
else:
q = norm.ppf(1 - alpha / 2)
half_interval_size = q * np.sqrt(self.forecast_variance)
pred_int = np.vstack(
(
self.predicted_mean - half_interval_size,
self.predicted_mean + half_interval_size,
)
).T
if self.use_pandas:
pred_int = pd.DataFrame(pred_int, index=self.row_labels)
names = [
f"lower PI (alpha={alpha:f})",
f"upper PI (alpha={alpha:f})",
]
pred_int.columns = names
return pred_int | Calculates prediction intervals by performing multiple simulations.
Parameters
----------
alpha : float, optional
The significance level for the prediction interval. Default is
0.05, that is, a 95% prediction interval. | pred_int | python | statsmodels/statsmodels | statsmodels/tsa/exponential_smoothing/ets.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/exponential_smoothing/ets.py | BSD-3-Clause |
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
#print len(params),
store_params.append(params)
if self.fixed_params is not None:
#print 'using fixed'
params = self.expandparams(params)
beta = params[:-2]
df = params[-2]
scale = params[-1]
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx | Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right] | nloglikeobs | python | statsmodels/statsmodels | statsmodels/examples/ex_generic_mle_tdist.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/ex_generic_mle_tdist.py | BSD-3-Clause |
def fit_ks(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
this does not trim lower values during ks optimization
'''
rvs = self.endog
rvsmin = rvs.min()
fixdf = np.nan * np.ones(3)
self.fixed_params = fixdf
self.fixed_paramsmask = np.isnan(fixdf)
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
#est = self.fit_fr(rvs, 1., frozen=[np.nan, loc, np.nan])
self.fixed_params[1] = loc
est = self.fit(start_params=self.start_params[self.fixed_paramsmask]).params
#est = self.fit(start_params=self.start_params, method='nm').params
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
locest = optimize.fmin(pareto_ks, rvsmin - 1.5, (rvs,))
est = stats.pareto.fit_fr(rvs, 0., frozen=[np.nan, locest, np.nan])
args = (est[0], locest[0], est[1])
return args | fit Pareto with nested optimization
originally published on stackoverflow
this does not trim lower values during ks optimization | fit_ks | python | statsmodels/statsmodels | statsmodels/examples/ex_generic_mle_tdist.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/ex_generic_mle_tdist.py | BSD-3-Clause |
def fit_ks1_trim(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
'''
self.nobs = self.endog.shape[0]
rvs = np.sort(self.endog)
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
est = stats.pareto.fit_fr(rvs, frozen=[np.nan, loc, np.nan])
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
#locest = optimize.fmin(pareto_ks, rvsmin*0.7, (rvs,))
maxind = min(np.floor(self.nobs*0.95).astype(int), self.nobs-10)
res = []
for trimidx in range(self.nobs//2, maxind):
loc = rvs[trimidx]
res.append([trimidx, pareto_ks(loc-1e-10, rvs[trimidx:])])
res = np.array(res)
bestidx = res[np.argmin(res[:,1]),0].astype(int)
print(bestidx)
locest = rvs[bestidx]
est = stats.pareto.fit_fr(rvs[bestidx:], 1., frozen=[np.nan, locest, np.nan])
args = (est[0], locest, est[1])
return args | fit Pareto with nested optimization
originally published on stackoverflow | fit_ks1_trim | python | statsmodels/statsmodels | statsmodels/examples/ex_generic_mle_tdist.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/ex_generic_mle_tdist.py | BSD-3-Clause |
def fit_ks1(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
'''
rvs = self.endog
rvsmin = rvs.min()
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
est = stats.pareto.fit_fr(rvs, 1., frozen=[np.nan, loc, np.nan])
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
#locest = optimize.fmin(pareto_ks, rvsmin*0.7, (rvs,))
locest = optimize.fmin(pareto_ks, rvsmin - 1.5, (rvs,))
est = stats.pareto.fit_fr(rvs, 1., frozen=[np.nan, locest, np.nan])
args = (est[0], locest[0], est[1])
return args | fit Pareto with nested optimization
originally published on stackoverflow | fit_ks1 | python | statsmodels/statsmodels | statsmodels/examples/ex_generic_mle_tdist.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/ex_generic_mle_tdist.py | BSD-3-Clause |
def probitloglike(params, endog, exog):
"""
Log likelihood for the probit
"""
q = 2*endog - 1
X = exog
return np.add.reduce(stats.norm.logcdf(q*np.dot(X,params))) | Log likelihood for the probit | probitloglike | python | statsmodels/statsmodels | statsmodels/examples/ex_generic_mle.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/ex_generic_mle.py | BSD-3-Clause |
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
#print len(params),
beta = params[:-2]
df = params[-2]
scale = params[-1]
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx | Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right] | nloglikeobs | python | statsmodels/statsmodels | statsmodels/examples/ex_generic_mle_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/ex_generic_mle_t.py | BSD-3-Clause |
def main():
"""
Provides a CLI for the demo.
"""
usage = "usage: %prog [options] mode"
usage += '\n'+docstr
parser = OptionParser(usage=usage)
# base_alpha
parser.add_option("-a", "--base_alpha",
help="Size of regularization param (the param actully used will "\
"automatically scale with data size in this demo) "\
"[default: %default]",
dest='base_alpha', action='store', type='float', default=0.01)
# num_samples
parser.add_option("-N", "--num_samples",
help="Number of data points to generate for fit "\
"[default: %default]",
dest='N', action='store', type='int', default=500)
# get_l1_slsqp_results
parser.add_option("--get_l1_slsqp_results",
help="Do an l1 fit using slsqp. [default: %default]", \
action="store_true",dest='get_l1_slsqp_results', default=False)
# get_l1_cvxopt_results
parser.add_option("--get_l1_cvxopt_results",
help="Do an l1 fit using cvxopt. [default: %default]", \
action="store_true",dest='get_l1_cvxopt_results', default=False)
# num_nonconst_covariates
parser.add_option("--num_nonconst_covariates",
help="Number of covariates that are not constant "\
"(a constant will be prepended) [default: %default]",
dest='num_nonconst_covariates', action='store',
type='int', default=10)
# noise_level
parser.add_option("--noise_level",
help="Level of the noise relative to signal [default: %default]",
dest='noise_level', action='store', type='float',
default=0.2)
# cor_length
parser.add_option("--cor_length",
help="Correlation length of the (Gaussian) independent variables"\
"[default: %default]",
dest='cor_length', action='store', type='float',
default=2)
# num_zero_params
parser.add_option("--num_zero_params",
help="Number of parameters equal to zero for every target in "\
"logistic regression examples. [default: %default]",
dest='num_zero_params', action='store', type='int',
default=8)
# num_targets
parser.add_option("-J", "--num_targets",
help="Number of choices for the endogenous response in "\
"multinomial logit example [default: %default]",
dest='num_targets', action='store', type='int', default=3)
# print_summaries
parser.add_option("-s", "--print_summaries",
help="Print the full fit summary. [default: %default]", \
action="store_true",dest='print_summaries', default=False)
# save_arrays
parser.add_option("--save_arrays",
help="Save exog/endog/true_params to disk for future use. "\
"[default: %default]",
action="store_true",dest='save_arrays', default=False)
# load_old_arrays
parser.add_option("--load_old_arrays",
help="Load exog/endog/true_params arrays from disk. "\
"[default: %default]",
action="store_true",dest='load_old_arrays', default=False)
(options, args) = parser.parse_args()
assert len(args) == 1
mode = args[0].lower()
run_demo(mode, **options.__dict__) | Provides a CLI for the demo. | main | python | statsmodels/statsmodels | statsmodels/examples/l1_demo/demo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/l1_demo/demo.py | BSD-3-Clause |
def run_demo(mode, base_alpha=0.01, N=500, get_l1_slsqp_results=False,
get_l1_cvxopt_results=False, num_nonconst_covariates=10,
noise_level=0.2, cor_length=2, num_zero_params=8, num_targets=3,
print_summaries=False, save_arrays=False, load_old_arrays=False):
"""
Run the demo and print results.
Parameters
----------
mode : str
either 'logit', 'mnlogit', or 'probit'
base_alpha : Float
Size of regularization param (the param actually used will
automatically scale with data size in this demo)
N : int
Number of data points to generate for fit
get_l1_slsqp_results : bool,
Do an l1 fit using slsqp.
get_l1_cvxopt_results : bool
Do an l1 fit using cvxopt
num_nonconst_covariates : int
Number of covariates that are not constant
(a constant will be prepended)
noise_level : float (non-negative)
Level of the noise relative to signal
cor_length : float (non-negative)
Correlation length of the (Gaussian) independent variables
num_zero_params : int
Number of parameters equal to zero for every target in logistic
regression examples.
num_targets : int
Number of choices for the endogenous response in multinomial logit
example
print_summaries : bool
print the full fit summary.
save_arrays : bool
Save exog/endog/true_params to disk for future use.
load_old_arrays
Load exog/endog/true_params arrays from disk.
"""
if mode != 'mnlogit':
print("Setting num_targets to 2 since mode != 'mnlogit'")
num_targets = 2
models = {
'logit': sm.Logit, 'mnlogit': sm.MNLogit, 'probit': sm.Probit}
endog_funcs = {
'logit': get_logit_endog, 'mnlogit': get_logit_endog,
'probit': get_probit_endog}
# The regularization parameter
# Here we scale it with N for simplicity. In practice, you should
# use cross validation to pick alpha
alpha = base_alpha * N * sp.ones((num_nonconst_covariates+1, num_targets-1))
alpha[0,:] = 0 # Do not regularize the intercept
#### Make the data and model
exog = get_exog(N, num_nonconst_covariates, cor_length)
exog = sm.add_constant(exog)
true_params = sp.rand(num_nonconst_covariates+1, num_targets-1)
if num_zero_params:
true_params[-num_zero_params:, :] = 0
endog = endog_funcs[mode](true_params, exog, noise_level)
endog, exog, true_params = save_andor_load_arrays(
endog, exog, true_params, save_arrays, load_old_arrays)
model = models[mode](endog, exog)
#### Get the results and print
results = run_solvers(model, true_params, alpha,
get_l1_slsqp_results, get_l1_cvxopt_results, print_summaries)
summary_str = get_summary_str(results, true_params, get_l1_slsqp_results,
get_l1_cvxopt_results, print_summaries)
print(summary_str) | Run the demo and print results.
Parameters
----------
mode : str
either 'logit', 'mnlogit', or 'probit'
base_alpha : Float
Size of regularization param (the param actually used will
automatically scale with data size in this demo)
N : int
Number of data points to generate for fit
get_l1_slsqp_results : bool,
Do an l1 fit using slsqp.
get_l1_cvxopt_results : bool
Do an l1 fit using cvxopt
num_nonconst_covariates : int
Number of covariates that are not constant
(a constant will be prepended)
noise_level : float (non-negative)
Level of the noise relative to signal
cor_length : float (non-negative)
Correlation length of the (Gaussian) independent variables
num_zero_params : int
Number of parameters equal to zero for every target in logistic
regression examples.
num_targets : int
Number of choices for the endogenous response in multinomial logit
example
print_summaries : bool
print the full fit summary.
save_arrays : bool
Save exog/endog/true_params to disk for future use.
load_old_arrays
Load exog/endog/true_params arrays from disk. | run_demo | python | statsmodels/statsmodels | statsmodels/examples/l1_demo/demo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/l1_demo/demo.py | BSD-3-Clause |
def run_solvers(model, true_params, alpha, get_l1_slsqp_results,
get_l1_cvxopt_results, print_summaries):
"""
Runs the solvers using the specified settings and returns a result string.
Works the same for any l1 penalized likelihood model.
"""
results = {}
#### Train the models
# Get ML results
results['results_ML'] = model.fit(method='newton')
# Get l1 results
start_params = results['results_ML'].params.ravel(order='F')
if get_l1_slsqp_results:
results['results_l1_slsqp'] = model.fit_regularized(
method='l1', alpha=alpha, maxiter=1000,
start_params=start_params, retall=True)
if get_l1_cvxopt_results:
results['results_l1_cvxopt_cp'] = model.fit_regularized(
method='l1_cvxopt_cp', alpha=alpha, maxiter=50,
start_params=start_params, retall=True, feastol=1e-5)
return results | Runs the solvers using the specified settings and returns a result string.
Works the same for any l1 penalized likelihood model. | run_solvers | python | statsmodels/statsmodels | statsmodels/examples/l1_demo/demo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/l1_demo/demo.py | BSD-3-Clause |
def get_summary_str(results, true_params, get_l1_slsqp_results,
get_l1_cvxopt_results, print_summaries):
"""
Gets a string summarizing the results.
"""
#### Extract specific results
results_ML = results['results_ML']
RMSE_ML = get_RMSE(results_ML, true_params)
if get_l1_slsqp_results:
results_l1_slsqp = results['results_l1_slsqp']
if get_l1_cvxopt_results:
results_l1_cvxopt_cp = results['results_l1_cvxopt_cp']
#### Format summaries
# Short summary
print_str = '\n\n=========== Short Error Summary ============'
print_str += '\n\n The maximum likelihood fit RMS error = %.4f' % RMSE_ML
if get_l1_slsqp_results:
RMSE_l1_slsqp = get_RMSE(results_l1_slsqp, true_params)
print_str += '\n The l1_slsqp fit RMS error = %.4f' % RMSE_l1_slsqp
if get_l1_cvxopt_results:
RMSE_l1_cvxopt_cp = get_RMSE(results_l1_cvxopt_cp, true_params)
print_str += '\n The l1_cvxopt_cp fit RMS error = %.4f' % RMSE_l1_cvxopt_cp
# Parameters
print_str += '\n\n\n============== Parameters ================='
print_str += "\n\nTrue parameters: \n%s" % true_params
# Full summary
if print_summaries:
print_str += '\n' + results_ML.summary().as_text()
if get_l1_slsqp_results:
print_str += '\n' + results_l1_slsqp.summary().as_text()
if get_l1_cvxopt_results:
print_str += '\n' + results_l1_cvxopt_cp.summary().as_text()
else:
print_str += '\n\nThe maximum likelihood params are \n%s' % results_ML.params
if get_l1_slsqp_results:
print_str += '\n\nThe l1_slsqp params are \n%s' % results_l1_slsqp.params
if get_l1_cvxopt_results:
print_str += '\n\nThe l1_cvxopt_cp params are \n%s' % \
results_l1_cvxopt_cp.params
# Return
return print_str | Gets a string summarizing the results. | get_summary_str | python | statsmodels/statsmodels | statsmodels/examples/l1_demo/demo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/l1_demo/demo.py | BSD-3-Clause |
def get_RMSE(results, true_params):
"""
Gets the (normalized) root mean square error.
"""
diff = results.params.reshape(true_params.shape) - true_params
raw_RMSE = sp.sqrt(((diff)**2).sum())
param_norm = sp.sqrt((true_params**2).sum())
return raw_RMSE / param_norm | Gets the (normalized) root mean square error. | get_RMSE | python | statsmodels/statsmodels | statsmodels/examples/l1_demo/demo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/l1_demo/demo.py | BSD-3-Clause |
def get_logit_endog(true_params, exog, noise_level):
"""
Gets an endogenous response that is consistent with the true_params,
perturbed by noise at noise_level.
"""
N = exog.shape[0]
### Create the probability of entering the different classes,
### given exog and true_params
Xdotparams = sp.dot(exog, true_params)
eXB = sp.column_stack((sp.ones(len(Xdotparams)), sp.exp(Xdotparams)))
class_probabilities = eXB / eXB.sum(1)[:, None]
### Create the endog
cdf = class_probabilities.cumsum(axis=1)
endog = sp.zeros(N)
for i in range(N):
endog[i] = sp.searchsorted(cdf[i, :], sp.rand())
return endog | Gets an endogenous response that is consistent with the true_params,
perturbed by noise at noise_level. | get_logit_endog | python | statsmodels/statsmodels | statsmodels/examples/l1_demo/demo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/l1_demo/demo.py | BSD-3-Clause |
def get_probit_endog(true_params, exog, noise_level):
"""
Gets an endogenous response that is consistent with the true_params,
perturbed by noise at noise_level.
"""
N = exog.shape[0]
### Create the probability of entering the different classes,
### given exog and true_params
Xdotparams = sp.dot(exog, true_params)
### Create the endog
cdf = stats.norm._cdf(-Xdotparams)
endog = sp.zeros(N)
for i in range(N):
endog[i] = sp.searchsorted(cdf[i, :], sp.rand())
return endog | Gets an endogenous response that is consistent with the true_params,
perturbed by noise at noise_level. | get_probit_endog | python | statsmodels/statsmodels | statsmodels/examples/l1_demo/demo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/l1_demo/demo.py | BSD-3-Clause |
def get_exog(N, num_nonconst_covariates, cor_length):
"""
Returns an exog array with correlations determined by cor_length.
The covariance matrix of exog will have (asymptotically, as
:math:'N\\to\\inf')
.. math:: Cov[i,j] = \\exp(-|i-j| / cor_length)
Higher cor_length makes the problem more ill-posed, and easier to screw
up with noise.
BEWARE: With very long correlation lengths, you often get a singular KKT
matrix (during the l1_cvxopt_cp fit)
"""
## Create the noiseless exog
uncorrelated_exog = sp.randn(N, num_nonconst_covariates)
if cor_length == 0:
exog = uncorrelated_exog
else:
cov_matrix = sp.zeros((num_nonconst_covariates, num_nonconst_covariates))
j = sp.arange(num_nonconst_covariates)
for i in range(num_nonconst_covariates):
cov_matrix[i,:] = sp.exp(-sp.fabs(i-j) / cor_length)
chol = linalg.cholesky(cov_matrix) # cov_matrix = sp.dot(chol.T, chol)
exog = sp.dot(uncorrelated_exog, chol)
## Return
return exog | Returns an exog array with correlations determined by cor_length.
The covariance matrix of exog will have (asymptotically, as
:math:'N\\to\\inf')
.. math:: Cov[i,j] = \\exp(-|i-j| / cor_length)
Higher cor_length makes the problem more ill-posed, and easier to screw
up with noise.
BEWARE: With very long correlation lengths, you often get a singular KKT
matrix (during the l1_cvxopt_cp fit) | get_exog | python | statsmodels/statsmodels | statsmodels/examples/l1_demo/demo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/l1_demo/demo.py | BSD-3-Clause |
def flip(self):
'''reverse polynomial coefficients
'''
return LagPolynomial(self.coef[::-1]) | reverse polynomial coefficients | flip | python | statsmodels/statsmodels | statsmodels/examples/tsa/lagpolynomial.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/tsa/lagpolynomial.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.