code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def mae(self):
"""
(float) Mean absolute error
"""
return np.mean(np.abs(self.resid)) | (float) Mean absolute error | mae | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def mse(self):
"""
(float) Mean squared error
"""
return self.sse / self.nobs | (float) Mean squared error | mse | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def pvalues(self):
"""
(array) The p-values associated with the z-statistics of the
coefficients. Note that the coefficients are assumed to have a Normal
distribution.
"""
pvalues = np.zeros_like(self.zvalues) * np.nan
mask = np.ones_like(pvalues, dtype=bool)
mask[self._free_params_index] = True
mask &= ~np.isnan(self.zvalues)
pvalues[mask] = norm.sf(np.abs(self.zvalues[mask])) * 2
return pvalues | (array) The p-values associated with the z-statistics of the
coefficients. Note that the coefficients are assumed to have a Normal
distribution. | pvalues | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def resid(self):
"""
(array) The model residuals. An (nobs x k_endog) array.
"""
# This is a (k_endog x nobs array; do not want to squeeze in case of
# the corner case where nobs = 1 (mostly a concern in the predict or
# forecast functions, but here also to maintain consistency)
resid = self.forecasts_error
if resid is None:
pass
elif resid.shape[0] == 1:
resid = resid[0, :]
else:
resid = resid.T
return resid | (array) The model residuals. An (nobs x k_endog) array. | resid | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def sse(self):
"""
(float) Sum of squared errors
"""
return np.sum(self.resid**2) | (float) Sum of squared errors | sse | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def zvalues(self):
"""
(array) The z-statistics for the coefficients.
"""
return self.params / self.bse | (array) The z-statistics for the coefficients. | zvalues | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def test_normality(self, method):
"""
Test for normality of standardized residuals.
Null hypothesis is normality.
Parameters
----------
method : {'jarquebera', None}
The statistical test for normality. Must be 'jarquebera' for
Jarque-Bera normality test. If None, an attempt is made to select
an appropriate test.
See Also
--------
statsmodels.stats.stattools.jarque_bera
The Jarque-Bera test of normality.
Notes
-----
Let `d` = max(loglikelihood_burn, nobs_diffuse); this test is
calculated ignoring the first `d` residuals.
In the case of missing data, the maintained hypothesis is that the
data are missing completely at random. This test is then run on the
standardized residuals excluding those corresponding to missing
observations.
"""
if method is None:
method = 'jarquebera'
if self.standardized_forecasts_error is None:
raise ValueError('Cannot compute test statistic when standardized'
' forecast errors have not been computed.')
if method == 'jarquebera':
from statsmodels.stats.stattools import jarque_bera
d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)
output = []
for i in range(self.model.k_endog):
resid = self.filter_results.standardized_forecasts_error[i, d:]
mask = ~np.isnan(resid)
output.append(jarque_bera(resid[mask]))
else:
raise NotImplementedError('Invalid normality test method.')
return np.array(output) | Test for normality of standardized residuals.
Null hypothesis is normality.
Parameters
----------
method : {'jarquebera', None}
The statistical test for normality. Must be 'jarquebera' for
Jarque-Bera normality test. If None, an attempt is made to select
an appropriate test.
See Also
--------
statsmodels.stats.stattools.jarque_bera
The Jarque-Bera test of normality.
Notes
-----
Let `d` = max(loglikelihood_burn, nobs_diffuse); this test is
calculated ignoring the first `d` residuals.
In the case of missing data, the maintained hypothesis is that the
data are missing completely at random. This test is then run on the
standardized residuals excluding those corresponding to missing
observations. | test_normality | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def test_serial_correlation(self, method, df_adjust=False, lags=None):
"""
Ljung-Box test for no serial correlation of standardized residuals
Null hypothesis is no serial correlation.
Parameters
----------
method : {'ljungbox', 'boxpierce', None}
The statistical test for serial correlation. If None, an attempt is
made to select an appropriate test.
lags : None, int or array_like
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag
length.
If lags is a list or array, then all lags are included up to the
largest lag in the list, however only the tests for the lags in the
list are reported.
If lags is None, then the default maxlag is min(10, nobs // 5) for
non-seasonal models and min(2*m, nobs // 5) for seasonal time
series where m is the seasonal period.
df_adjust : bool, optional
If True, the degrees of freedom consumed by the model is subtracted
from the degrees-of-freedom used in the test so that the adjusted
dof for the statistics are lags - model_df. In an ARMA model, this
value is usually p+q where p is the AR order and q is the MA order.
When using df_adjust, it is not possible to use tests based on
fewer than model_df lags.
Returns
-------
output : ndarray
An array with `(test_statistic, pvalue)` for each endogenous
variable and each lag. The array is then sized
`(k_endog, 2, lags)`. If the method is called as
`ljungbox = res.test_serial_correlation()`, then `ljungbox[i]`
holds the results of the Ljung-Box test (as would be returned by
`statsmodels.stats.diagnostic.acorr_ljungbox`) for the `i` th
endogenous variable.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
Ljung-Box test for serial correlation.
Notes
-----
Let `d` = max(loglikelihood_burn, nobs_diffuse); this test is
calculated ignoring the first `d` residuals.
Output is nan for any endogenous variable which has missing values.
"""
if method is None:
method = 'ljungbox'
if self.standardized_forecasts_error is None:
raise ValueError('Cannot compute test statistic when standardized'
' forecast errors have not been computed.')
if method == 'ljungbox' or method == 'boxpierce':
from statsmodels.stats.diagnostic import acorr_ljungbox
d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)
# This differs from self.nobs_effective because here we want to
# exclude exact diffuse periods, whereas self.nobs_effective only
# excludes explicitly burned (usually approximate diffuse) periods.
nobs_effective = self.nobs - d
output = []
# Default lags for acorr_ljungbox is 40, but may not always have
# that many observations
if lags is None:
seasonal_periods = getattr(self.model, "seasonal_periods", 0)
if seasonal_periods:
lags = min(2 * seasonal_periods, nobs_effective // 5)
else:
lags = min(10, nobs_effective // 5)
model_df = 0
if df_adjust:
model_df = max(0, self.df_model - self.k_diffuse_states - 1)
cols = [2, 3] if method == 'boxpierce' else [0, 1]
for i in range(self.model.k_endog):
results = acorr_ljungbox(
self.filter_results.standardized_forecasts_error[i][d:],
lags=lags, boxpierce=(method == 'boxpierce'),
model_df=model_df)
output.append(np.asarray(results)[:, cols].T)
output = np.c_[output]
else:
raise NotImplementedError('Invalid serial correlation test'
' method.')
return output | Ljung-Box test for no serial correlation of standardized residuals
Null hypothesis is no serial correlation.
Parameters
----------
method : {'ljungbox', 'boxpierce', None}
The statistical test for serial correlation. If None, an attempt is
made to select an appropriate test.
lags : None, int or array_like
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag
length.
If lags is a list or array, then all lags are included up to the
largest lag in the list, however only the tests for the lags in the
list are reported.
If lags is None, then the default maxlag is min(10, nobs // 5) for
non-seasonal models and min(2*m, nobs // 5) for seasonal time
series where m is the seasonal period.
df_adjust : bool, optional
If True, the degrees of freedom consumed by the model is subtracted
from the degrees-of-freedom used in the test so that the adjusted
dof for the statistics are lags - model_df. In an ARMA model, this
value is usually p+q where p is the AR order and q is the MA order.
When using df_adjust, it is not possible to use tests based on
fewer than model_df lags.
Returns
-------
output : ndarray
An array with `(test_statistic, pvalue)` for each endogenous
variable and each lag. The array is then sized
`(k_endog, 2, lags)`. If the method is called as
`ljungbox = res.test_serial_correlation()`, then `ljungbox[i]`
holds the results of the Ljung-Box test (as would be returned by
`statsmodels.stats.diagnostic.acorr_ljungbox`) for the `i` th
endogenous variable.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
Ljung-Box test for serial correlation.
Notes
-----
Let `d` = max(loglikelihood_burn, nobs_diffuse); this test is
calculated ignoring the first `d` residuals.
Output is nan for any endogenous variable which has missing values. | test_serial_correlation | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def impulse_responses(self, steps=1, impulse=0, orthogonalized=False,
cumulative=False, **kwargs):
"""
Impulse response function
Parameters
----------
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 1. Note that for time-invariant models, the initial
impulse is not counted as a step, so if `steps=1`, the output will
have 2 entries.
impulse : int, str or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1`. If a str, it indicates which column of df
the unit (1) impulse is given.
Alternatively, a custom impulse vector may be provided; must be
shaped `k_posdef x 1`.
orthogonalized : bool, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : bool, optional
Whether or not to return cumulative impulse responses. Default is
False.
anchor : int, str, or datetime, optional
Time point within the sample for the state innovation impulse. Type
depends on the index of the given `endog` in the model. Two special
cases are the strings 'start' and 'end', which refer to setting the
impulse at the first and last points of the sample, respectively.
Integer values can run from 0 to `nobs - 1`, or can be negative to
apply negative indexing. Finally, if a date/time index was provided
to the model, then this argument can be a date string to parse or a
datetime type. Default is 'start'.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
**kwargs
If the model has time-varying design or transition matrices and the
combination of `anchor` and `steps` implies creating impulse
responses for the out-of-sample period, then these matrices must
have updated values provided for the out-of-sample steps. For
example, if `design` is a time-varying component, `nobs` is 10,
`anchor=1`, and `steps` is 15, a (`k_endog` x `k_states` x 7)
matrix must be provided with the new design matrix values.
Returns
-------
impulse_responses : ndarray
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. For a time-invariant model, the
impulse responses are given for `steps + 1` elements (this gives
the "initial impulse" followed by `steps` responses for the
important cases of VAR and SARIMAX models), while for time-varying
models the impulse responses are only given for `steps` elements
(to avoid having to unexpectedly provide updated time-varying
matrices).
See Also
--------
simulate
Simulate a time series according to the given state space model,
optionally with specified series for the innovations.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses.
"""
scale = self.scale if self.filter_results.filter_concentrated else None
with self.model.ssm.fixed_scale(scale):
irfs = self.model.impulse_responses(self.params, steps, impulse,
orthogonalized, cumulative,
**kwargs)
# These are wrapped automatically, so just return the array
if isinstance(irfs, (pd.Series, pd.DataFrame)):
irfs = irfs.values
return irfs | Impulse response function
Parameters
----------
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 1. Note that for time-invariant models, the initial
impulse is not counted as a step, so if `steps=1`, the output will
have 2 entries.
impulse : int, str or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1`. If a str, it indicates which column of df
the unit (1) impulse is given.
Alternatively, a custom impulse vector may be provided; must be
shaped `k_posdef x 1`.
orthogonalized : bool, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : bool, optional
Whether or not to return cumulative impulse responses. Default is
False.
anchor : int, str, or datetime, optional
Time point within the sample for the state innovation impulse. Type
depends on the index of the given `endog` in the model. Two special
cases are the strings 'start' and 'end', which refer to setting the
impulse at the first and last points of the sample, respectively.
Integer values can run from 0 to `nobs - 1`, or can be negative to
apply negative indexing. Finally, if a date/time index was provided
to the model, then this argument can be a date string to parse or a
datetime type. Default is 'start'.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
**kwargs
If the model has time-varying design or transition matrices and the
combination of `anchor` and `steps` implies creating impulse
responses for the out-of-sample period, then these matrices must
have updated values provided for the out-of-sample steps. For
example, if `design` is a time-varying component, `nobs` is 10,
`anchor=1`, and `steps` is 15, a (`k_endog` x `k_states` x 7)
matrix must be provided with the new design matrix values.
Returns
-------
impulse_responses : ndarray
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. For a time-invariant model, the
impulse responses are given for `steps + 1` elements (this gives
the "initial impulse" followed by `steps` responses for the
important cases of VAR and SARIMAX models), while for time-varying
models the impulse responses are only given for `steps` elements
(to avoid having to unexpectedly provide updated time-varying
matrices).
See Also
--------
simulate
Simulate a time series according to the given state space model,
optionally with specified series for the innovations.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses. | impulse_responses | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def news(self, comparison, impact_date=None, impacted_variable=None,
start=None, end=None, periods=None, exog=None,
comparison_type=None, revisions_details_start=False,
state_index=None, return_raw=False, tolerance=1e-10, **kwargs):
"""
Compute impacts from updated data (news and revisions)
Parameters
----------
comparison : array_like or MLEResults
An updated dataset with updated and/or revised data from which the
news can be computed, or an updated or previous results object
to use in computing the news.
impact_date : int, str, or datetime, optional
A single specific period of impacts from news and revisions to
compute. Can also be a date string to parse or a datetime type.
This argument cannot be used in combination with `start`, `end`, or
`periods`. Default is the first out-of-sample observation.
impacted_variable : str, list, array, or slice, optional
Observation variable label or slice of labels specifying that only
specific impacted variables should be shown in the News output. The
impacted variable(s) describe the variables that were *affected* by
the news. If you do not know the labels for the variables, check
the `endog_names` attribute of the model instance.
start : int, str, or datetime, optional
The first period of impacts from news and revisions to compute.
Can also be a date string to parse or a datetime type. Default is
the first out-of-sample observation.
end : int, str, or datetime, optional
The last period of impacts from news and revisions to compute.
Can also be a date string to parse or a datetime type. Default is
the first out-of-sample observation.
periods : int, optional
The number of periods of impacts from news and revisions to
compute.
exog : array_like, optional
Array of exogenous regressors for the out-of-sample period, if
applicable.
comparison_type : {None, 'previous', 'updated'}
This denotes whether the `comparison` argument represents a
*previous* results object or dataset or an *updated* results object
or dataset. If not specified, then an attempt is made to determine
the comparison type.
revisions_details_start : bool, int, str, or datetime, optional
The period at which to beging computing the detailed impacts of
data revisions. Any revisions prior to this period will have their
impacts grouped together. If a negative integer, interpreted as
an offset from the end of the dataset. If set to True, detailed
impacts are computed for all revisions, while if set to False, all
revisions are grouped together. Default is False. Note that for
large models, setting this to be near the beginning of the sample
can cause this function to be slow.
state_index : array_like, optional
An optional index specifying a subset of states to use when
constructing the impacts of revisions and news. For example, if
`state_index=[0, 1]` is passed, then only the impacts to the
observed variables arising from the impacts to the first two
states will be returned. Default is to use all states.
return_raw : bool, optional
Whether or not to return only the specific output or a full
results object. Default is to return a full results object.
tolerance : float, optional
The numerical threshold for determining zero impact. Default is
that any impact less than 1e-10 is assumed to be zero.
Returns
-------
NewsResults
Impacts of data revisions and news on estimates
References
----------
.. [1] Bańbura, Marta, and Michele Modugno.
"Maximum likelihood estimation of factor models on datasets with
arbitrary pattern of missing data."
Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
.. [2] Bańbura, Marta, Domenico Giannone, and Lucrezia Reichlin.
"Nowcasting."
The Oxford Handbook of Economic Forecasting. July 8, 2011.
.. [3] Bańbura, Marta, Domenico Giannone, Michele Modugno, and Lucrezia
Reichlin.
"Now-casting and the real-time data flow."
In Handbook of economic forecasting, vol. 2, pp. 195-237.
Elsevier, 2013.
"""
# Validate input
if self.smoother_results is None:
raise ValueError('Cannot compute news without Kalman smoother'
' results.')
if state_index is not None:
state_index = np.sort(np.array(state_index, dtype=int))
if state_index[0] < 0:
raise ValueError('Cannot include negative indexes in'
' `state_index`.')
if state_index[-1] >= self.model.k_states:
raise ValueError(f'Given state index {state_index[-1]} is too'
' large for the number of states in the model'
f' ({self.model.k_states}).')
if not isinstance(revisions_details_start, (int, bool)):
revisions_details_start, _, _, _ = (
self.model._get_prediction_index(
revisions_details_start, revisions_details_start))
# Get the previous and updated results objects from `self` and
# `comparison`:
previous, updated, comparison_dataset = self._get_previous_updated(
comparison, exog=exog, comparison_type=comparison_type, **kwargs)
# Handle start, end, periods
start, end, prediction_index = get_impact_dates(
previous_model=previous.model, updated_model=updated.model,
impact_date=impact_date, start=start, end=end, periods=periods)
# News results will always use Pandas, so if the model's data was not
# from Pandas, we'll create an index, as if the model's data had been
# given a default Pandas index.
if prediction_index is None:
prediction_index = pd.RangeIndex(start=start, stop=end + 1)
# For time-varying models try to create an appended `updated` model
# with NaN values. Do not extend the model if this was already done
# above (i.e. the case that `comparison` was a new dataset), because
# in that case `exog` and `kwargs` should have
# been set with the input `comparison` dataset in mind, and so would be
# useless here. Ultimately, we've already extended `updated` as far
# as we can. So raise an exception in that case with a useful message.
# However, we still want to try to accommodate extending the model here
# if it is possible.
# Note that we do not need to extend time-invariant models, because
# `KalmanSmoother.news` can itself handle any impact dates for
# time-invariant models.
time_varying = not (previous.filter_results.time_invariant or
updated.filter_results.time_invariant)
if time_varying and end >= updated.nobs:
# If we the given `comparison` was a dataset and either `exog` or
# `kwargs` was set, then we assume that we cannot create an updated
# time-varying model (because then we can't tell if `kwargs` and
# `exog` arguments are meant to apply to the `comparison` dataset
# or to this extension)
if comparison_dataset and (exog is not None or len(kwargs) > 0):
if comparison is updated:
raise ValueError('If providing an updated dataset as the'
' `comparison` with a time-varying model,'
' then the `end` period cannot be beyond'
' the end of that updated dataset.')
else:
raise ValueError('If providing an previous dataset as the'
' `comparison` with a time-varying model,'
' then the `end` period cannot be beyond'
' the end of the (updated) results'
' object.')
# Try to extend `updated`
updated_orig = updated
# TODO: `append` should fix this k_endog=1 issue for us
# TODO: is the + 1 necessary?
if self.model.k_endog > 1:
extra = np.zeros((end - updated.nobs + 1,
self.model.k_endog)) * np.nan
else:
extra = np.zeros((end - updated.nobs + 1,)) * np.nan
updated = updated_orig.append(extra, exog=exog, **kwargs)
# Compute the news
news_results = updated._news_previous_results(
previous, start, end + 1, periods,
revisions_details_start=revisions_details_start,
state_index=state_index)
if not return_raw:
news_results = NewsResults(
news_results, self, updated, previous, impacted_variable,
tolerance, row_labels=prediction_index)
return news_results | Compute impacts from updated data (news and revisions)
Parameters
----------
comparison : array_like or MLEResults
An updated dataset with updated and/or revised data from which the
news can be computed, or an updated or previous results object
to use in computing the news.
impact_date : int, str, or datetime, optional
A single specific period of impacts from news and revisions to
compute. Can also be a date string to parse or a datetime type.
This argument cannot be used in combination with `start`, `end`, or
`periods`. Default is the first out-of-sample observation.
impacted_variable : str, list, array, or slice, optional
Observation variable label or slice of labels specifying that only
specific impacted variables should be shown in the News output. The
impacted variable(s) describe the variables that were *affected* by
the news. If you do not know the labels for the variables, check
the `endog_names` attribute of the model instance.
start : int, str, or datetime, optional
The first period of impacts from news and revisions to compute.
Can also be a date string to parse or a datetime type. Default is
the first out-of-sample observation.
end : int, str, or datetime, optional
The last period of impacts from news and revisions to compute.
Can also be a date string to parse or a datetime type. Default is
the first out-of-sample observation.
periods : int, optional
The number of periods of impacts from news and revisions to
compute.
exog : array_like, optional
Array of exogenous regressors for the out-of-sample period, if
applicable.
comparison_type : {None, 'previous', 'updated'}
This denotes whether the `comparison` argument represents a
*previous* results object or dataset or an *updated* results object
or dataset. If not specified, then an attempt is made to determine
the comparison type.
revisions_details_start : bool, int, str, or datetime, optional
The period at which to beging computing the detailed impacts of
data revisions. Any revisions prior to this period will have their
impacts grouped together. If a negative integer, interpreted as
an offset from the end of the dataset. If set to True, detailed
impacts are computed for all revisions, while if set to False, all
revisions are grouped together. Default is False. Note that for
large models, setting this to be near the beginning of the sample
can cause this function to be slow.
state_index : array_like, optional
An optional index specifying a subset of states to use when
constructing the impacts of revisions and news. For example, if
`state_index=[0, 1]` is passed, then only the impacts to the
observed variables arising from the impacts to the first two
states will be returned. Default is to use all states.
return_raw : bool, optional
Whether or not to return only the specific output or a full
results object. Default is to return a full results object.
tolerance : float, optional
The numerical threshold for determining zero impact. Default is
that any impact less than 1e-10 is assumed to be zero.
Returns
-------
NewsResults
Impacts of data revisions and news on estimates
References
----------
.. [1] Bańbura, Marta, and Michele Modugno.
"Maximum likelihood estimation of factor models on datasets with
arbitrary pattern of missing data."
Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
.. [2] Bańbura, Marta, Domenico Giannone, and Lucrezia Reichlin.
"Nowcasting."
The Oxford Handbook of Economic Forecasting. July 8, 2011.
.. [3] Bańbura, Marta, Domenico Giannone, Michele Modugno, and Lucrezia
Reichlin.
"Now-casting and the real-time data flow."
In Handbook of economic forecasting, vol. 2, pp. 195-237.
Elsevier, 2013. | news | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def append(self, endog, exog=None, refit=False, fit_kwargs=None,
copy_initialization=False, **kwargs):
"""
Recreate the results object with new data appended to the original data
Creates a new result object applied to a dataset that is created by
appending new data to the end of the model's original data. The new
results can then be used for analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
refit : bool, optional
Whether to re-fit the parameters, based on the combined dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
copy_initialization : bool, optional
Whether or not to copy the initialization from the current results
set to the new model. Default is False
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /
`smooth`.
copy_initialization : bool, optional
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results from both the
original dataset and the new dataset.
Notes
-----
The `endog` and `exog` arguments to this method must be formatted in
the same way (e.g. Pandas Series versus Numpy array) as were the
`endog` and `exog` arrays passed to the original model.
The `endog` argument to this method should consist of new observations
that occurred directly after the last element of `endog`. For any other
kind of dataset, see the `apply` method.
This method will apply filtering to all of the original data as well
as to the new data. To apply filtering only to the new data (which
can be much faster if the original dataset is large), see the `extend`
method.
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEResults.extend
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Examples
--------
>>> index = pd.period_range(start='2000', periods=2, freq='Y')
>>> original_observations = pd.Series([1.2, 1.5], index=index)
>>> mod = sm.tsa.SARIMAX(original_observations)
>>> res = mod.fit()
>>> print(res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(res.fittedvalues)
2000 0.0000
2001 1.1707
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2002 1.4634
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='2002', periods=1, freq='Y')
>>> new_observations = pd.Series([0.9], index=new_index)
>>> updated_res = res.append(new_observations)
>>> print(updated_res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(updated_res.fittedvalues)
2000 0.0000
2001 1.1707
2002 1.4634
Freq: A-DEC, dtype: float64
>>> print(updated_res.forecast(1))
2003 0.878
Freq: A-DEC, dtype: float64
"""
start = self.nobs
end = self.nobs + len(endog) - 1
_, _, _, append_ix = self.model._get_prediction_index(start, end)
# Check the index of the new data
if isinstance(self.model.data, PandasData):
_check_index(append_ix, endog, '`endog`')
# Concatenate the new data to original data
new_endog = concat([self.model.data.orig_endog, endog], axis=0,
allow_mix=True)
# Handle `exog`
if exog is not None:
_, exog = prepare_exog(exog)
_check_index(append_ix, exog, '`exog`')
new_exog = concat([self.model.data.orig_exog, exog], axis=0,
allow_mix=True)
else:
new_exog = None
# Create a continuous index for the combined data
if isinstance(self.model.data, PandasData):
start = 0
end = len(new_endog) - 1
_, _, _, new_index = self.model._get_prediction_index(start, end)
# Standardize `endog` to have the right index and columns
columns = self.model.endog_names
if not isinstance(columns, list):
columns = [columns]
new_endog = pd.DataFrame(new_endog, index=new_index,
columns=columns)
# Standardize `exog` to have the right index
if new_exog is not None:
new_exog = pd.DataFrame(new_exog, index=new_index,
columns=self.model.exog_names)
if copy_initialization:
init = Initialization.from_results(self.filter_results)
kwargs.setdefault('initialization', init)
mod = self.model.clone(new_endog, exog=new_exog, **kwargs)
res = self._apply(mod, refit=refit, fit_kwargs=fit_kwargs)
return res | Recreate the results object with new data appended to the original data
Creates a new result object applied to a dataset that is created by
appending new data to the end of the model's original data. The new
results can then be used for analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
refit : bool, optional
Whether to re-fit the parameters, based on the combined dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
copy_initialization : bool, optional
Whether or not to copy the initialization from the current results
set to the new model. Default is False
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /
`smooth`.
copy_initialization : bool, optional
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results from both the
original dataset and the new dataset.
Notes
-----
The `endog` and `exog` arguments to this method must be formatted in
the same way (e.g. Pandas Series versus Numpy array) as were the
`endog` and `exog` arrays passed to the original model.
The `endog` argument to this method should consist of new observations
that occurred directly after the last element of `endog`. For any other
kind of dataset, see the `apply` method.
This method will apply filtering to all of the original data as well
as to the new data. To apply filtering only to the new data (which
can be much faster if the original dataset is large), see the `extend`
method.
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEResults.extend
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Examples
--------
>>> index = pd.period_range(start='2000', periods=2, freq='Y')
>>> original_observations = pd.Series([1.2, 1.5], index=index)
>>> mod = sm.tsa.SARIMAX(original_observations)
>>> res = mod.fit()
>>> print(res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(res.fittedvalues)
2000 0.0000
2001 1.1707
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2002 1.4634
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='2002', periods=1, freq='Y')
>>> new_observations = pd.Series([0.9], index=new_index)
>>> updated_res = res.append(new_observations)
>>> print(updated_res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(updated_res.fittedvalues)
2000 0.0000
2001 1.1707
2002 1.4634
Freq: A-DEC, dtype: float64
>>> print(updated_res.forecast(1))
2003 0.878
Freq: A-DEC, dtype: float64 | append | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def extend(self, endog, exog=None, fit_kwargs=None, **kwargs):
"""
Recreate the results object for new data that extends the original data
Creates a new result object applied to a new dataset that is assumed to
follow directly from the end of the model's original data. The new
results can then be used for analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
fit_kwargs : dict, optional
Keyword arguments to pass to `filter` or `smooth`.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results only for the new
dataset.
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEResults.append
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Notes
-----
The `endog` argument to this method should consist of new observations
that occurred directly after the last element of the model's original
`endog` array. For any other kind of dataset, see the `apply` method.
This method will apply filtering only to the new data provided by the
`endog` argument, which can be much faster than re-filtering the entire
dataset. However, the returned results object will only have results
for the new data. To retrieve results for both the new data and the
original data, see the `append` method.
Examples
--------
>>> index = pd.period_range(start='2000', periods=2, freq='Y')
>>> original_observations = pd.Series([1.2, 1.5], index=index)
>>> mod = sm.tsa.SARIMAX(original_observations)
>>> res = mod.fit()
>>> print(res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(res.fittedvalues)
2000 0.0000
2001 1.1707
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2002 1.4634
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='2002', periods=1, freq='Y')
>>> new_observations = pd.Series([0.9], index=new_index)
>>> updated_res = res.extend(new_observations)
>>> print(updated_res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(updated_res.fittedvalues)
2002 1.4634
Freq: A-DEC, dtype: float64
>>> print(updated_res.forecast(1))
2003 0.878
Freq: A-DEC, dtype: float64
"""
start = self.nobs
end = self.nobs + len(endog) - 1
_, _, _, extend_ix = self.model._get_prediction_index(start, end)
if isinstance(self.model.data, PandasData):
_check_index(extend_ix, endog, '`endog`')
# Standardize `endog` to have the right index and columns
columns = self.model.endog_names
if not isinstance(columns, list):
columns = [columns]
endog = pd.DataFrame(endog, index=extend_ix, columns=columns)
# Extend the current fit result to additional data
mod = self.model.clone(endog, exog=exog, **kwargs)
mod.ssm.initialization = Initialization(
mod.k_states, 'known', constant=self.predicted_state[..., -1],
stationary_cov=self.predicted_state_cov[..., -1])
res = self._apply(mod, refit=False, fit_kwargs=fit_kwargs)
return res | Recreate the results object for new data that extends the original data
Creates a new result object applied to a new dataset that is assumed to
follow directly from the end of the model's original data. The new
results can then be used for analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
fit_kwargs : dict, optional
Keyword arguments to pass to `filter` or `smooth`.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results only for the new
dataset.
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEResults.append
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Notes
-----
The `endog` argument to this method should consist of new observations
that occurred directly after the last element of the model's original
`endog` array. For any other kind of dataset, see the `apply` method.
This method will apply filtering only to the new data provided by the
`endog` argument, which can be much faster than re-filtering the entire
dataset. However, the returned results object will only have results
for the new data. To retrieve results for both the new data and the
original data, see the `append` method.
Examples
--------
>>> index = pd.period_range(start='2000', periods=2, freq='Y')
>>> original_observations = pd.Series([1.2, 1.5], index=index)
>>> mod = sm.tsa.SARIMAX(original_observations)
>>> res = mod.fit()
>>> print(res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(res.fittedvalues)
2000 0.0000
2001 1.1707
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2002 1.4634
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='2002', periods=1, freq='Y')
>>> new_observations = pd.Series([0.9], index=new_index)
>>> updated_res = res.extend(new_observations)
>>> print(updated_res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(updated_res.fittedvalues)
2002 1.4634
Freq: A-DEC, dtype: float64
>>> print(updated_res.forecast(1))
2003 0.878
Freq: A-DEC, dtype: float64 | extend | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def apply(self, endog, exog=None, refit=False, fit_kwargs=None,
copy_initialization=False, **kwargs):
"""
Apply the fitted parameters to new data unrelated to the original data
Creates a new result object using the current fitted parameters,
applied to a completely new dataset that is assumed to be unrelated to
the model's original data. The new results can then be used for
analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
refit : bool, optional
Whether to re-fit the parameters, using the new dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
copy_initialization : bool, optional
Whether or not to copy the initialization from the current results
set to the new model. Default is False
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /
`smooth`.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results only for the new
dataset.
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEResults.append
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Notes
-----
The `endog` argument to this method should consist of new observations
that are not necessarily related to the original model's `endog`
dataset. For observations that continue that original dataset by follow
directly after its last element, see the `append` and `extend` methods.
Examples
--------
>>> index = pd.period_range(start='2000', periods=2, freq='Y')
>>> original_observations = pd.Series([1.2, 1.5], index=index)
>>> mod = sm.tsa.SARIMAX(original_observations)
>>> res = mod.fit()
>>> print(res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(res.fittedvalues)
2000 0.0000
2001 1.1707
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2002 1.4634
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='1980', periods=3, freq='Y')
>>> new_observations = pd.Series([1.4, 0.3, 1.2], index=new_index)
>>> new_res = res.apply(new_observations)
>>> print(new_res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(new_res.fittedvalues)
1980 1.1707
1981 1.3659
1982 0.2927
Freq: A-DEC, dtype: float64
Freq: A-DEC, dtype: float64
>>> print(new_res.forecast(1))
1983 1.1707
Freq: A-DEC, dtype: float64
"""
mod = self.model.clone(endog, exog=exog, **kwargs)
if copy_initialization:
init = Initialization.from_results(self.filter_results)
mod.ssm.initialization = init
res = self._apply(mod, refit=refit, fit_kwargs=fit_kwargs)
return res | Apply the fitted parameters to new data unrelated to the original data
Creates a new result object using the current fitted parameters,
applied to a completely new dataset that is assumed to be unrelated to
the model's original data. The new results can then be used for
analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
refit : bool, optional
Whether to re-fit the parameters, using the new dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
copy_initialization : bool, optional
Whether or not to copy the initialization from the current results
set to the new model. Default is False
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /
`smooth`.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results only for the new
dataset.
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEResults.append
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Notes
-----
The `endog` argument to this method should consist of new observations
that are not necessarily related to the original model's `endog`
dataset. For observations that continue that original dataset by follow
directly after its last element, see the `append` and `extend` methods.
Examples
--------
>>> index = pd.period_range(start='2000', periods=2, freq='Y')
>>> original_observations = pd.Series([1.2, 1.5], index=index)
>>> mod = sm.tsa.SARIMAX(original_observations)
>>> res = mod.fit()
>>> print(res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(res.fittedvalues)
2000 0.0000
2001 1.1707
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2002 1.4634
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='1980', periods=3, freq='Y')
>>> new_observations = pd.Series([1.4, 0.3, 1.2], index=new_index)
>>> new_res = res.apply(new_observations)
>>> print(new_res.params)
ar.L1 0.9756
sigma2 0.0889
dtype: float64
>>> print(new_res.fittedvalues)
1980 1.1707
1981 1.3659
1982 0.2927
Freq: A-DEC, dtype: float64
Freq: A-DEC, dtype: float64
>>> print(new_res.forecast(1))
1983 1.1707
Freq: A-DEC, dtype: float64 | apply | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def plot_diagnostics(self, variable=0, lags=10, fig=None, figsize=None,
truncate_endog_names=24, auto_ylims=False,
bartlett_confint=False, acf_kwargs=None):
"""
Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : int, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : int, optional
Number of lags to include in the correlogram. Default is 10.
fig : Figure, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
auto_ylims : bool, optional
If True, adjusts automatically the y-axis limits to ACF values.
bartlett_confint : bool, default True
Confidence intervals for ACF values are generally placed at 2
standard errors around r_k. The formula used for standard error
depends upon the situation. If the autocorrelations are being used
to test for randomness of residuals as part of the ARIMA routine,
the standard errors are determined assuming the residuals are white
noise. The approximate formula for any lag is that standard error
of each r_k = 1/sqrt(N). See section 9.4 of [1] for more details on
the 1/sqrt(N) result. For more elementary discussion, see section
5.3.2 in [2].
For the ACF of raw data, the standard error at a lag k is
found as if the right model was an MA(k-1). This allows the
possible interpretation that if all autocorrelations past a
certain lag are within the limits, the model might be an MA of
order defined by the last significant autocorrelation. In this
case, a moving average model is assumed for the data and the
standard errors for the confidence intervals should be
generated using Bartlett's formula. For more details on
Bartlett formula result, see section 7.2 in [1].+
acf_kwargs : dict, optional
Optional dictionary of keyword arguments that are directly passed
on to the correlogram Matplotlib plot produced by plot_acf().
Returns
-------
Figure
Figure instance with diagnostic plots
See Also
--------
statsmodels.graphics.gofplots.qqplot
statsmodels.graphics.tsaplots.plot_acf
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residuals, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
References
----------
[1] Brockwell and Davis, 1987. Time Series Theory and Methods
[2] Brockwell and Davis, 2010. Introduction to Time Series and
Forecasting, 2nd edition.
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
# Eliminate residuals associated with burned or diffuse likelihoods
d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)
# If given a variable name, find the index
if isinstance(variable, str):
variable = self.model.endog_names.index(variable)
# Get residuals
if hasattr(self.data, 'dates') and self.data.dates is not None:
ix = self.data.dates[d:]
else:
ix = np.arange(self.nobs - d)
resid = pd.Series(
self.filter_results.standardized_forecasts_error[variable, d:],
index=ix)
if resid.shape[0] < max(d, lags):
raise ValueError(
"Length of endogenous variable must be larger the the number "
"of lags used in the model and the number of observations "
"burned in the log-likelihood calculation."
)
# Top-left: residuals vs time
ax = fig.add_subplot(221)
resid.dropna().plot(ax=ax)
ax.hlines(0, ix[0], ix[-1], alpha=0.5)
ax.set_xlim(ix[0], ix[-1])
name = self.model.endog_names[variable]
if len(name) > truncate_endog_names:
name = name[:truncate_endog_names - 3] + '...'
ax.set_title(f'Standardized residual for "{name}"')
# Top-right: histogram, Gaussian kernel density, Normal density
# Can only do histogram and Gaussian kernel density on the non-null
# elements
resid_nonmissing = resid.dropna()
ax = fig.add_subplot(222)
ax.hist(resid_nonmissing, density=True, label='Hist',
edgecolor='#FFFFFF')
from scipy.stats import gaussian_kde, norm
kde = gaussian_kde(resid_nonmissing)
xlim = (-1.96*2, 1.96*2)
x = np.linspace(xlim[0], xlim[1])
ax.plot(x, kde(x), label='KDE')
ax.plot(x, norm.pdf(x), label='N(0,1)')
ax.set_xlim(xlim)
ax.legend()
ax.set_title('Histogram plus estimated density')
# Bottom-left: QQ plot
ax = fig.add_subplot(223)
from statsmodels.graphics.gofplots import qqplot
qqplot(resid_nonmissing, line='s', ax=ax)
ax.set_title('Normal Q-Q')
# Bottom-right: Correlogram
ax = fig.add_subplot(224)
from statsmodels.graphics.tsaplots import plot_acf
if acf_kwargs is None:
acf_kwargs = {}
plot_acf(resid, ax=ax, lags=lags, auto_ylims=auto_ylims,
bartlett_confint=bartlett_confint, **acf_kwargs)
ax.set_title('Correlogram')
return fig | Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : int, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : int, optional
Number of lags to include in the correlogram. Default is 10.
fig : Figure, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
auto_ylims : bool, optional
If True, adjusts automatically the y-axis limits to ACF values.
bartlett_confint : bool, default True
Confidence intervals for ACF values are generally placed at 2
standard errors around r_k. The formula used for standard error
depends upon the situation. If the autocorrelations are being used
to test for randomness of residuals as part of the ARIMA routine,
the standard errors are determined assuming the residuals are white
noise. The approximate formula for any lag is that standard error
of each r_k = 1/sqrt(N). See section 9.4 of [1] for more details on
the 1/sqrt(N) result. For more elementary discussion, see section
5.3.2 in [2].
For the ACF of raw data, the standard error at a lag k is
found as if the right model was an MA(k-1). This allows the
possible interpretation that if all autocorrelations past a
certain lag are within the limits, the model might be an MA of
order defined by the last significant autocorrelation. In this
case, a moving average model is assumed for the data and the
standard errors for the confidence intervals should be
generated using Bartlett's formula. For more details on
Bartlett formula result, see section 7.2 in [1].+
acf_kwargs : dict, optional
Optional dictionary of keyword arguments that are directly passed
on to the correlogram Matplotlib plot produced by plot_acf().
Returns
-------
Figure
Figure instance with diagnostic plots
See Also
--------
statsmodels.graphics.gofplots.qqplot
statsmodels.graphics.tsaplots.plot_acf
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residuals, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
References
----------
[1] Brockwell and Davis, 1987. Time Series Theory and Methods
[2] Brockwell and Davis, 2010. Introduction to Time Series and
Forecasting, 2nd edition. | plot_diagnostics | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def summary(self, alpha=.05, start=None, title=None, model_name=None,
display_params=True, display_diagnostics=True,
truncate_endog_names=None, display_max_endog=None,
extra_top_left=None, extra_top_right=None):
"""
Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
model_name : str
The name of the model used. Default is to use model class name.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.tableformatting import fmt_params
# Model specification results
model = self.model
if title is None:
title = 'Statespace Model Results'
if start is None:
start = 0
if self.model._index_dates:
ix = self.model._index
d = ix[start]
sample = ['%02d-%02d-%02d' % (d.month, d.day, d.year)]
d = ix[-1]
sample += ['- ' + '%02d-%02d-%02d' % (d.month, d.day, d.year)]
else:
sample = [str(start), ' - ' + str(self.nobs)]
# Standardize the model name as a list of str
if model_name is None:
model_name = model.__class__.__name__
# Truncate endog names
if truncate_endog_names is None:
truncate_endog_names = False if self.model.k_endog == 1 else 24
endog_names = self.model.endog_names
if not isinstance(endog_names, list):
endog_names = [endog_names]
endog_names = [str(name) for name in endog_names]
if truncate_endog_names is not False:
n = truncate_endog_names
endog_names = [name if len(name) <= n else name[:n] + '...'
for name in endog_names]
# Shorten the endog name list if applicable
if display_max_endog is None:
display_max_endog = np.inf
yname = None
if self.model.k_endog > display_max_endog:
k = self.model.k_endog - 1
yname = '"' + endog_names[0] + f'", and {k} more'
# Create the tables
if not isinstance(model_name, list):
model_name = [model_name]
top_left = [('Dep. Variable:', None)]
top_left.append(('Model:', [model_name[0]]))
for i in range(1, len(model_name)):
top_left.append(('', ['+ ' + model_name[i]]))
top_left += [
('Date:', None),
('Time:', None),
('Sample:', [sample[0]]),
('', [sample[1]])
]
top_right = [
('No. Observations:', [self.nobs]),
('Log Likelihood', ["%#5.3f" % self.llf]),
]
if hasattr(self, 'rsquared'):
top_right.append(('R-squared:', ["%#8.3f" % self.rsquared]))
top_right += [
('AIC', ["%#5.3f" % self.aic]),
('BIC', ["%#5.3f" % self.bic]),
('HQIC', ["%#5.3f" % self.hqic])]
if (self.filter_results is not None and
self.filter_results.filter_concentrated):
top_right.append(('Scale', ["%#5.3f" % self.scale]))
if hasattr(self, 'cov_type'):
cov_type = self.cov_type
if cov_type == 'none':
cov_type = 'Not computed'
top_left.append(('Covariance Type:', [cov_type]))
if extra_top_left is not None:
top_left += extra_top_left
if extra_top_right is not None:
top_right += extra_top_right
summary = Summary()
summary.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title, yname=yname)
table_ix = 1
if len(self.params) > 0 and display_params:
summary.add_table_params(self, alpha=alpha,
xname=self.param_names, use_t=False)
table_ix += 1
# Diagnostic tests results
if display_diagnostics:
try:
het = self.test_heteroskedasticity(method='breakvar')
except Exception: # FIXME: catch something specific
het = np.zeros((self.model.k_endog, 2)) * np.nan
try:
lb = self.test_serial_correlation(method='ljungbox', lags=[1])
except Exception: # FIXME: catch something specific
lb = np.zeros((self.model.k_endog, 2, 1)) * np.nan
try:
jb = self.test_normality(method='jarquebera')
except Exception: # FIXME: catch something specific
jb = np.zeros((self.model.k_endog, 4)) * np.nan
if self.model.k_endog <= display_max_endog:
def format_str(array):
return [', '.join([f'{i:.2f}' for i in array])]
diagn_left = [
('Ljung-Box (L1) (Q):', format_str(lb[:, 0, -1])),
('Prob(Q):', format_str(lb[:, 1, -1])),
('Heteroskedasticity (H):', format_str(het[:, 0])),
('Prob(H) (two-sided):', format_str(het[:, 1]))]
diagn_right = [('Jarque-Bera (JB):', format_str(jb[:, 0])),
('Prob(JB):', format_str(jb[:, 1])),
('Skew:', format_str(jb[:, 2])),
('Kurtosis:', format_str(jb[:, 3]))
]
summary.add_table_2cols(self, gleft=diagn_left,
gright=diagn_right, title="")
else:
columns = ['LjungBox\n(L1) (Q)', 'Prob(Q)',
'Het.(H)', 'Prob(H)',
'Jarque\nBera(JB)', 'Prob(JB)', 'Skew', 'Kurtosis']
data = pd.DataFrame(
np.c_[lb[:, :2, -1], het[:, :2], jb[:, :4]],
index=endog_names, columns=columns)
try:
data = data.map(
lambda num: '' if pd.isnull(num) else '%.2f' % num
)
except AttributeError:
data = data.applymap(
lambda num: '' if pd.isnull(num) else '%.2f' % num
)
data.index.name = 'Residual of\nDep. variable'
data = data.reset_index()
params_data = data.values
params_header = data.columns.tolist()
params_stubs = None
title = 'Residual diagnostics:'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
# Add warnings/notes, added to text format only
etext = []
if hasattr(self, 'cov_type') and 'description' in self.cov_kwds:
etext.append(self.cov_kwds['description'])
if self._rank < (len(self.params) - len(self.fixed_params)):
cov_params = self.cov_params()
if len(self.fixed_params) > 0:
mask = np.ix_(self._free_params_index, self._free_params_index)
cov_params = cov_params[mask]
etext.append("Covariance matrix is singular or near-singular,"
" with condition number %6.3g. Standard errors may be"
" unstable." % _safe_cond(cov_params))
if etext:
etext = [f"[{i + 1}] {text}"
for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
summary.add_extra_txt(etext)
return summary | Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
model_name : str
The name of the model used. Default is to use model class name.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary | summary | python | statsmodels/statsmodels | statsmodels/tsa/statespace/mlemodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/mlemodel.py | BSD-3-Clause |
def __direct_sum(square_matrices):
"""Compute the matrix direct sum of an iterable of square numpy 2-d arrays
"""
new_shape = np.sum([m.shape for m in square_matrices], axis=0)
new_array = np.zeros(new_shape)
offset = 0
for m in square_matrices:
rows, cols = m.shape
assert rows == cols
new_array[offset:offset + rows, offset:offset + rows] = m
offset += rows
return new_array | Compute the matrix direct sum of an iterable of square numpy 2-d arrays | __direct_sum | python | statsmodels/statsmodels | statsmodels/tsa/statespace/tests/test_structural.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/tests/test_structural.py | BSD-3-Clause |
def test_nan():
"""
This is a very slow test to check that the distribution of simulated states
(from the posterior) is correct in the presense of NaN values. Here, it
checks the marginal distribution of the drawn states against the values
computed from the smoother and prints the result.
With the fixed simulation smoother, it prints:
True values:
[1. 0.66666667 0.66666667 1. ]
[0. 0.95238095 0.95238095 0. ]
Simulated values:
[1. 0.66699187 0.66456719 1. ]
[0. 0.953608 0.953198 0. ]
Previously, it would have printed:
True values:
[1. 0.66666667 0.66666667 1. ]
[0. 0.95238095 0.95238095 0. ]
Simulated values:
[1. 0.66666667 0.66666667 1. ]
[0. 0. 0. 0.]
"""
return
mod = sarimax.SARIMAX([1, np.nan, np.nan, 1], order=(1, 0, 0), trend='c')
res = mod.smooth([0, 0.5, 1.0])
rs = np.random.RandomState(1234)
sim = mod.simulation_smoother(random_state=rs)
n = 1000000
out = np.zeros((n, mod.nobs))
for i in range(n):
sim.simulate()
out[i] = sim.simulated_state
print('True values:')
print(res.smoothed_state[0])
print(res.smoothed_state_cov[0, 0])
print()
print('Simulated values:')
print(np.mean(out, axis=0))
print(np.var(out, axis=0).round(6)) | This is a very slow test to check that the distribution of simulated states
(from the posterior) is correct in the presense of NaN values. Here, it
checks the marginal distribution of the drawn states against the values
computed from the smoother and prints the result.
With the fixed simulation smoother, it prints:
True values:
[1. 0.66666667 0.66666667 1. ]
[0. 0.95238095 0.95238095 0. ]
Simulated values:
[1. 0.66699187 0.66456719 1. ]
[0. 0.953608 0.953198 0. ]
Previously, it would have printed:
True values:
[1. 0.66666667 0.66666667 1. ]
[0. 0.95238095 0.95238095 0. ]
Simulated values:
[1. 0.66666667 0.66666667 1. ]
[0. 0. 0. 0.] | test_nan | python | statsmodels/statsmodels | statsmodels/tsa/statespace/tests/test_simulation_smoothing.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/statespace/tests/test_simulation_smoothing.py | BSD-3-Clause |
def date_parser(timestr, parserinfo=None, **kwargs):
"""
Uses dateutil.parser.parse, but also handles monthly dates of the form
1999m4, 1999:m4, 1999:mIV, 1999mIV and the same for quarterly data
with q instead of m. It is not case sensitive. The default for annual
data is the end of the year, which also differs from dateutil.
"""
flags = re.IGNORECASE | re.VERBOSE
if re.search(_q_pattern, timestr, flags):
y,q = timestr.replace(":","").lower().split('q')
month, day = _quarter_to_day[q.upper()]
year = int(y)
elif re.search(_m_pattern, timestr, flags):
y,m = timestr.replace(":","").lower().split('m')
month, day = _month_to_day[m.upper()]
year = int(y)
if _is_leap(y) and month == 2:
day += 1
elif re.search(_y_pattern, timestr, flags):
month, day = 12, 31
year = int(timestr)
else:
return to_datetime(timestr, **kwargs)
return datetime.datetime(year, month, day) | Uses dateutil.parser.parse, but also handles monthly dates of the form
1999m4, 1999:m4, 1999:mIV, 1999mIV and the same for quarterly data
with q instead of m. It is not case sensitive. The default for annual
data is the end of the year, which also differs from dateutil. | date_parser | python | statsmodels/statsmodels | statsmodels/tsa/base/datetools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/datetools.py | BSD-3-Clause |
def date_range_str(start, end=None, length=None):
"""
Returns a list of abbreviated date strings.
Parameters
----------
start : str
The first abbreviated date, for instance, '1965q1' or '1965m1'
end : str, optional
The last abbreviated date if length is None.
length : int, optional
The length of the returned array of end is None.
Returns
-------
date_range : list
List of strings
"""
flags = re.IGNORECASE | re.VERBOSE
start = start.lower()
if re.search(_m_pattern, start, flags):
annual_freq = 12
split = 'm'
elif re.search(_q_pattern, start, flags):
annual_freq = 4
split = 'q'
elif re.search(_y_pattern, start, flags):
annual_freq = 1
start += 'a1' # hack
if end:
end += 'a1'
split = 'a'
else:
raise ValueError("Date %s not understood" % start)
yr1, offset1 = lmap(int, start.replace(":","").split(split))
if end is not None:
end = end.lower()
yr2, offset2 = lmap(int, end.replace(":","").split(split))
else: # length > 0
if not length:
raise ValueError("length must be provided if end is None")
yr2 = yr1 + length // annual_freq
offset2 = length % annual_freq + (offset1 - 1)
years = [str(yr) for yr in np.repeat(lrange(yr1 + 1, yr2), annual_freq)]
# tack on first year
years = [(str(yr1))] * (annual_freq + 1 - offset1) + years
# tack on last year
years = years + [(str(yr2))] * offset2
if split != 'a':
offset = np.tile(np.arange(1, annual_freq + 1), yr2 - yr1 - 1).astype("S2")
offset = np.r_[np.arange(offset1, annual_freq + 1).astype('S2'), offset]
offset = np.r_[offset, np.arange(1, offset2 + 1).astype('S2')]
date_arr_range = [''.join([i, split, asstr(j)])
for i, j in zip(years, offset)]
else:
date_arr_range = years
return date_arr_range | Returns a list of abbreviated date strings.
Parameters
----------
start : str
The first abbreviated date, for instance, '1965q1' or '1965m1'
end : str, optional
The last abbreviated date if length is None.
length : int, optional
The length of the returned array of end is None.
Returns
-------
date_range : list
List of strings | date_range_str | python | statsmodels/statsmodels | statsmodels/tsa/base/datetools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/datetools.py | BSD-3-Clause |
def dates_from_str(dates):
"""
Turns a sequence of date strings and returns a list of datetime.
Parameters
----------
dates : array_like
A sequence of abbreviated dates as string. For instance,
'1996m1' or '1996Q1'. The datetime dates are at the end of the
period.
Returns
-------
date_list : ndarray
A list of datetime types.
"""
return lmap(date_parser, dates) | Turns a sequence of date strings and returns a list of datetime.
Parameters
----------
dates : array_like
A sequence of abbreviated dates as string. For instance,
'1996m1' or '1996Q1'. The datetime dates are at the end of the
period.
Returns
-------
date_list : ndarray
A list of datetime types. | dates_from_str | python | statsmodels/statsmodels | statsmodels/tsa/base/datetools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/datetools.py | BSD-3-Clause |
def dates_from_range(start, end=None, length=None):
"""
Turns a sequence of date strings and returns a list of datetime.
Parameters
----------
start : str
The first abbreviated date, for instance, '1965q1' or '1965m1'
end : str, optional
The last abbreviated date if length is None.
length : int, optional
The length of the returned array of end is None.
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> nobs = 50
>>> dates = pd.date_range('1960m1', length=nobs)
Returns
-------
date_list : ndarray
A list of datetime types.
"""
dates = date_range_str(start, end, length)
return dates_from_str(dates) | Turns a sequence of date strings and returns a list of datetime.
Parameters
----------
start : str
The first abbreviated date, for instance, '1965q1' or '1965m1'
end : str, optional
The last abbreviated date if length is None.
length : int, optional
The length of the returned array of end is None.
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> nobs = 50
>>> dates = pd.date_range('1960m1', length=nobs)
Returns
-------
date_list : ndarray
A list of datetime types. | dates_from_range | python | statsmodels/statsmodels | statsmodels/tsa/base/datetools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/datetools.py | BSD-3-Clause |
def row_labels(self):
"""The row labels used in pandas-types."""
return self._row_labels | The row labels used in pandas-types. | row_labels | python | statsmodels/statsmodels | statsmodels/tsa/base/prediction.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/prediction.py | BSD-3-Clause |
def predicted_mean(self):
"""The predicted mean"""
return self._wrap_pandas(self._predicted_mean, "predicted_mean") | The predicted mean | predicted_mean | python | statsmodels/statsmodels | statsmodels/tsa/base/prediction.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/prediction.py | BSD-3-Clause |
def var_pred_mean(self):
"""The variance of the predicted mean"""
if self._var_pred_mean.ndim > 2:
return self._var_pred_mean
return self._wrap_pandas(self._var_pred_mean, "var_pred_mean") | The variance of the predicted mean | var_pred_mean | python | statsmodels/statsmodels | statsmodels/tsa/base/prediction.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/prediction.py | BSD-3-Clause |
def se_mean(self):
"""The standard deviation of the predicted mean"""
ndim = self._var_pred_mean.ndim
if ndim == 1:
values = np.sqrt(self._var_pred_mean)
elif ndim == 3:
values = np.sqrt(self._var_pred_mean.T.diagonal())
else:
raise NotImplementedError("var_pre_mean must be 1 or 3 dim")
return self._wrap_pandas(values, "mean_se") | The standard deviation of the predicted mean | se_mean | python | statsmodels/statsmodels | statsmodels/tsa/base/prediction.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/prediction.py | BSD-3-Clause |
def tvalues(self):
"""The ratio of the predicted mean to its standard deviation"""
val = self.predicted_mean / self.se_mean
if isinstance(val, pd.Series):
val.name = "tvalues"
return val | The ratio of the predicted mean to its standard deviation | tvalues | python | statsmodels/statsmodels | statsmodels/tsa/base/prediction.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/prediction.py | BSD-3-Clause |
def t_test(self, value=0, alternative="two-sided"):
"""
z- or t-test for hypothesis that mean is equal to value
Parameters
----------
value : array_like
value under the null hypothesis
alternative : str
'two-sided', 'larger', 'smaller'
Returns
-------
stat : ndarray
test statistic
pvalue : ndarray
p-value of the hypothesis test, the distribution is given by
the attribute of the instance, specified in `__init__`. Default
if not specified is the normal distribution.
"""
# assumes symmetric distribution
stat = (self.predicted_mean - value) / self.se_mean
if alternative in ["two-sided", "2-sided", "2s"]:
pvalue = self.dist.sf(np.abs(stat), *self.dist_args) * 2
elif alternative in ["larger", "l"]:
pvalue = self.dist.sf(stat, *self.dist_args)
elif alternative in ["smaller", "s"]:
pvalue = self.dist.cdf(stat, *self.dist_args)
else:
raise ValueError("invalid alternative")
return stat, pvalue | z- or t-test for hypothesis that mean is equal to value
Parameters
----------
value : array_like
value under the null hypothesis
alternative : str
'two-sided', 'larger', 'smaller'
Returns
-------
stat : ndarray
test statistic
pvalue : ndarray
p-value of the hypothesis test, the distribution is given by
the attribute of the instance, specified in `__init__`. Default
if not specified is the normal distribution. | t_test | python | statsmodels/statsmodels | statsmodels/tsa/base/prediction.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/prediction.py | BSD-3-Clause |
def conf_int(self, alpha=0.05):
"""
Confidence interval construction for the predicted mean.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the prediction interval.
The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
pi : {ndarray, DataFrame}
The array has the lower and the upper limit of the prediction
interval in the columns.
"""
se = self.se_mean
q = self.dist.ppf(1 - alpha / 2.0, *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
ci = np.column_stack((lower, upper))
if self._use_pandas:
return self._wrap_pandas(ci, columns=["lower", "upper"])
return ci | Confidence interval construction for the predicted mean.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the prediction interval.
The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
pi : {ndarray, DataFrame}
The array has the lower and the upper limit of the prediction
interval in the columns. | conf_int | python | statsmodels/statsmodels | statsmodels/tsa/base/prediction.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/prediction.py | BSD-3-Clause |
def summary_frame(self, alpha=0.05):
"""
Summary frame of mean, variance and confidence interval.
Returns
-------
DataFrame
DataFrame containing four columns:
* mean
* mean_se
* mean_ci_lower
* mean_ci_upper
Notes
-----
Fixes alpha to 0.05 so that the confidence interval should have 95%
coverage.
"""
ci_mean = np.asarray(self.conf_int(alpha=alpha))
lower, upper = ci_mean[:, 0], ci_mean[:, 1]
to_include = {
"mean": self.predicted_mean,
"mean_se": self.se_mean,
"mean_ci_lower": lower,
"mean_ci_upper": upper,
}
return pd.DataFrame(to_include) | Summary frame of mean, variance and confidence interval.
Returns
-------
DataFrame
DataFrame containing four columns:
* mean
* mean_se
* mean_ci_lower
* mean_ci_upper
Notes
-----
Fixes alpha to 0.05 so that the confidence interval should have 95%
coverage. | summary_frame | python | statsmodels/statsmodels | statsmodels/tsa/base/prediction.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/prediction.py | BSD-3-Clause |
def get_index_loc(key, index):
"""
Get the location of a specific key in an index
Parameters
----------
key : label
The key for which to find the location if the underlying index is
a DateIndex or a location if the underlying index is a RangeIndex
or an Index with an integer dtype.
index : pd.Index
The index to search.
Returns
-------
loc : int
The location of the key
index : pd.Index
The index including the key; this is a copy of the original index
unless the index had to be expanded to accommodate `key`.
index_was_expanded : bool
Whether or not the index was expanded to accommodate `key`.
Notes
-----
If `key` is past the end of of the given index, and the index is either
an Index with an integral dtype or a date index, this function extends
the index up to and including key, and then returns the location in the
new index.
"""
base_index = index
index = base_index
date_index = isinstance(base_index, (PeriodIndex, DatetimeIndex))
int_index = is_int_index(base_index)
range_index = isinstance(base_index, RangeIndex)
index_class = type(base_index)
nobs = len(index)
# Special handling for RangeIndex
if range_index and isinstance(key, (int, np.integer)):
# Negative indices (that lie in the Index)
if key < 0 and -key <= nobs:
key = nobs + key
# Out-of-sample (note that we include key itself in the new index)
elif key > nobs - 1:
# See gh5835. Remove the except after pandas 0.25 required.
try:
base_index_start = base_index.start
base_index_step = base_index.step
except AttributeError:
base_index_start = base_index._start
base_index_step = base_index._step
stop = base_index_start + (key + 1) * base_index_step
index = RangeIndex(
start=base_index_start, stop=stop, step=base_index_step
)
# Special handling for NumericIndex
if (
not range_index
and int_index
and not date_index
and isinstance(key, (int, np.integer))
):
# Negative indices (that lie in the Index)
if key < 0 and -key <= nobs:
key = nobs + key
# Out-of-sample (note that we include key itself in the new index)
elif key > base_index[-1]:
index = Index(np.arange(base_index[0], int(key + 1)))
# Special handling for date indexes
if date_index:
# Use index type to choose creation function
if index_class is DatetimeIndex:
index_fn = date_range
else:
index_fn = period_range
# Integer key (i.e. already given a location)
if isinstance(key, (int, np.integer)):
# Negative indices (that lie in the Index)
if key < 0 and -key < nobs:
key = index[nobs + key]
# Out-of-sample (note that we include key itself in the new
# index)
elif key > len(base_index) - 1:
index = index_fn(
start=base_index[0],
periods=int(key + 1),
freq=base_index.freq,
)
key = index[-1]
else:
key = index[key]
# Other key types (i.e. string date or some datetime-like object)
else:
# Convert the key to the appropriate date-like object
if index_class is PeriodIndex:
date_key = Period(key, freq=base_index.freq)
else:
date_key = Timestamp(key)
# Out-of-sample
if date_key > base_index[-1]:
# First create an index that may not always include `key`
index = index_fn(
start=base_index[0], end=date_key, freq=base_index.freq
)
# Now make sure we include `key`
if not index[-1] == date_key:
index = index_fn(
start=base_index[0],
periods=len(index) + 1,
freq=base_index.freq,
)
# To avoid possible inconsistencies with `get_loc` below,
# set the key directly equal to the last index location
key = index[-1]
# Get the location
if date_index:
# (note that get_loc will throw a KeyError if key is invalid)
loc = index.get_loc(key)
elif int_index or range_index:
# For NumericIndex and RangeIndex, key is assumed to be the location
# and not an index value (this assumption is required to support
# RangeIndex)
try:
index[key]
# We want to raise a KeyError in this case, to keep the exception
# consistent across index types.
# - Attempting to index with an out-of-bound location (e.g.
# index[10] on an index of length 9) will raise an IndexError
# (as of Pandas 0.22)
# - Attemtping to index with a type that cannot be cast to integer
# (e.g. a non-numeric string) will raise a ValueError if the
# index is RangeIndex (otherwise will raise an IndexError)
# (as of Pandas 0.22)
except (IndexError, ValueError) as e:
raise KeyError(str(e))
loc = key
else:
loc = index.get_loc(key)
# Check if we now have a modified index
index_was_expanded = index is not base_index
# Return the index through the end of the loc / slice
if isinstance(loc, slice):
end = loc.stop - 1
else:
end = loc
return loc, index[: end + 1], index_was_expanded | Get the location of a specific key in an index
Parameters
----------
key : label
The key for which to find the location if the underlying index is
a DateIndex or a location if the underlying index is a RangeIndex
or an Index with an integer dtype.
index : pd.Index
The index to search.
Returns
-------
loc : int
The location of the key
index : pd.Index
The index including the key; this is a copy of the original index
unless the index had to be expanded to accommodate `key`.
index_was_expanded : bool
Whether or not the index was expanded to accommodate `key`.
Notes
-----
If `key` is past the end of of the given index, and the index is either
an Index with an integral dtype or a date index, this function extends
the index up to and including key, and then returns the location in the
new index. | get_index_loc | python | statsmodels/statsmodels | statsmodels/tsa/base/tsa_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/tsa_model.py | BSD-3-Clause |
def get_index_label_loc(key, index, row_labels):
"""
Get the location of a specific key in an index or model row labels
Parameters
----------
key : label
The key for which to find the location if the underlying index is
a DateIndex or is only being used as row labels, or a location if
the underlying index is a RangeIndex or a NumericIndex.
index : pd.Index
The index to search.
row_labels : pd.Index
Row labels to search if key not found in index
Returns
-------
loc : int
The location of the key
index : pd.Index
The index including the key; this is a copy of the original index
unless the index had to be expanded to accommodate `key`.
index_was_expanded : bool
Whether or not the index was expanded to accommodate `key`.
Notes
-----
This function expands on `get_index_loc` by first trying the given
base index (or the model's index if the base index was not given) and
then falling back to try again with the model row labels as the base
index.
"""
try:
loc, index, index_was_expanded = get_index_loc(key, index)
except KeyError as e:
try:
if not isinstance(key, (int, np.integer)):
loc = row_labels.get_loc(key)
else:
raise
# Require scalar
# Pandas may return a slice if there are multiple matching
# locations that are monotonic increasing (otherwise it may
# return an array of integer locations, see below).
if isinstance(loc, slice):
loc = loc.start
if isinstance(loc, np.ndarray):
# Pandas may return a mask (boolean array), for e.g.:
# pd.Index(list('abcb')).get_loc('b')
if loc.dtype == bool:
# Return the first True value
# (we know there is at least one True value if we're
# here because otherwise the get_loc call would have
# raised an exception)
loc = np.argmax(loc)
# Finally, Pandas may return an integer array of
# locations that match the given value, for e.g.
# pd.DatetimeIndex(['2001-02', '2001-01']).get_loc('2001')
# (this appears to be slightly undocumented behavior, since
# only int, slice, and mask are mentioned in docs for
# pandas.Index.get_loc as of 0.23.4)
else:
loc = loc[0]
if not isinstance(loc, numbers.Integral):
raise
index = row_labels[: loc + 1]
index_was_expanded = False
except Exception:
raise e
return loc, index, index_was_expanded | Get the location of a specific key in an index or model row labels
Parameters
----------
key : label
The key for which to find the location if the underlying index is
a DateIndex or is only being used as row labels, or a location if
the underlying index is a RangeIndex or a NumericIndex.
index : pd.Index
The index to search.
row_labels : pd.Index
Row labels to search if key not found in index
Returns
-------
loc : int
The location of the key
index : pd.Index
The index including the key; this is a copy of the original index
unless the index had to be expanded to accommodate `key`.
index_was_expanded : bool
Whether or not the index was expanded to accommodate `key`.
Notes
-----
This function expands on `get_index_loc` by first trying the given
base index (or the model's index if the base index was not given) and
then falling back to try again with the model row labels as the base
index. | get_index_label_loc | python | statsmodels/statsmodels | statsmodels/tsa/base/tsa_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/tsa_model.py | BSD-3-Clause |
def get_prediction_index(
start,
end,
nobs,
base_index,
index=None,
silent=False,
index_none=False,
index_generated=None,
data=None,
) -> tuple[int, int, int, Index | None]:
"""
Get the location of a specific key in an index or model row labels
Parameters
----------
start : label
The key at which to start prediction. Depending on the underlying
model's index, may be an integer, a date (string, datetime object,
pd.Timestamp, or pd.Period object), or some other object in the
model's row labels.
end : label
The key at which to end prediction (note that this key will be
*included* in prediction). Depending on the underlying
model's index, may be an integer, a date (string, datetime object,
pd.Timestamp, or pd.Period object), or some other object in the
model's row labels.
nobs : int
base_index : pd.Index
index : pd.Index, optional
Optionally an index to associate the predicted results to. If None,
an attempt is made to create an index for the predicted results
from the model's index or model's row labels.
silent : bool, optional
Argument to silence warnings.
Returns
-------
start : int
The index / observation location at which to begin prediction.
end : int
The index / observation location at which to end in-sample
prediction. The maximum value for this is nobs-1.
out_of_sample : int
The number of observations to forecast after the end of the sample.
prediction_index : pd.Index or None
The index associated with the prediction results. This index covers
the range [start, end + out_of_sample]. If the model has no given
index and no given row labels (i.e. endog/exog is not Pandas), then
this will be None.
Notes
-----
The arguments `start` and `end` behave differently, depending on if
they are integer or not. If either is an integer, then it is assumed
to refer to a *location* in the index, not to an index value. On the
other hand, if it is a date string or some other type of object, then
it is assumed to refer to an index *value*. In all cases, the returned
`start` and `end` values refer to index *locations* (so in the former
case, the given location is validated and returned whereas in the
latter case a location is found that corresponds to the given index
value).
This difference in behavior is necessary to support `RangeIndex`. This
is because integers for a RangeIndex could refer either to index values
or to index locations in an ambiguous way (while for `NumericIndex`,
since we have required them to be full indexes, there is no ambiguity).
"""
# Convert index keys (start, end) to index locations and get associated
# indexes.
try:
start, _, start_oos = get_index_label_loc(
start, base_index, data.row_labels
)
except KeyError:
raise KeyError(
"The `start` argument could not be matched to a"
" location related to the index of the data."
)
if end is None:
end = max(start, len(base_index) - 1)
try:
end, end_index, end_oos = get_index_label_loc(
end, base_index, data.row_labels
)
except KeyError:
raise KeyError(
"The `end` argument could not be matched to a"
" location related to the index of the data."
)
# Handle slices (if the given index keys cover more than one date)
if isinstance(start, slice):
start = start.start
if isinstance(end, slice):
end = end.stop - 1
# Get the actual index for the prediction
prediction_index = end_index[start:]
# Validate prediction options
if end < start:
raise ValueError("Prediction must have `end` after `start`.")
# Handle custom prediction index
# First, if we were given an index, check that it's the right size and
# use it if so
if index is not None:
if not len(prediction_index) == len(index):
raise ValueError(
"Invalid `index` provided in prediction."
" Must have length consistent with `start`"
" and `end` arguments."
)
# But if we weren't given Pandas input, this index will not be
# used because the data will not be wrapped; in that case, issue
# a warning
if not isinstance(data, PandasData) and not silent:
warnings.warn(
"Because the model data (`endog`, `exog`) were"
" not given as Pandas objects, the prediction"
" output will be Numpy arrays, and the given"
" `index` argument will only be used"
" internally.",
ValueWarning,
stacklevel=2,
)
prediction_index = Index(index)
# Now, if we *do not* have a supported index, but we were given some
# kind of index...
elif index_generated and not index_none:
# If we are in sample, and have row labels, use them
if data.row_labels is not None and not (start_oos or end_oos):
prediction_index = data.row_labels[start : end + 1]
# Otherwise, warn the user that they will get an NumericIndex
else:
warnings.warn(
"No supported index is available. In the next"
" version, calling this method in a model"
" without a supported index will result in an"
" exception.",
FutureWarning,
stacklevel=2,
)
elif index_none:
prediction_index = None
# For backwards compatibility, set `predict_*` values
if prediction_index is not None:
data.predict_start = prediction_index[0]
data.predict_end = prediction_index[-1]
data.predict_dates = prediction_index
else:
data.predict_start = None
data.predict_end = None
data.predict_dates = None
# Compute out-of-sample observations
out_of_sample = max(end - (nobs - 1), 0)
end -= out_of_sample
return start, end, out_of_sample, prediction_index | Get the location of a specific key in an index or model row labels
Parameters
----------
start : label
The key at which to start prediction. Depending on the underlying
model's index, may be an integer, a date (string, datetime object,
pd.Timestamp, or pd.Period object), or some other object in the
model's row labels.
end : label
The key at which to end prediction (note that this key will be
*included* in prediction). Depending on the underlying
model's index, may be an integer, a date (string, datetime object,
pd.Timestamp, or pd.Period object), or some other object in the
model's row labels.
nobs : int
base_index : pd.Index
index : pd.Index, optional
Optionally an index to associate the predicted results to. If None,
an attempt is made to create an index for the predicted results
from the model's index or model's row labels.
silent : bool, optional
Argument to silence warnings.
Returns
-------
start : int
The index / observation location at which to begin prediction.
end : int
The index / observation location at which to end in-sample
prediction. The maximum value for this is nobs-1.
out_of_sample : int
The number of observations to forecast after the end of the sample.
prediction_index : pd.Index or None
The index associated with the prediction results. This index covers
the range [start, end + out_of_sample]. If the model has no given
index and no given row labels (i.e. endog/exog is not Pandas), then
this will be None.
Notes
-----
The arguments `start` and `end` behave differently, depending on if
they are integer or not. If either is an integer, then it is assumed
to refer to a *location* in the index, not to an index value. On the
other hand, if it is a date string or some other type of object, then
it is assumed to refer to an index *value*. In all cases, the returned
`start` and `end` values refer to index *locations* (so in the former
case, the given location is validated and returned whereas in the
latter case a location is found that corresponds to the given index
value).
This difference in behavior is necessary to support `RangeIndex`. This
is because integers for a RangeIndex could refer either to index values
or to index locations in an ambiguous way (while for `NumericIndex`,
since we have required them to be full indexes, there is no ambiguity). | get_prediction_index | python | statsmodels/statsmodels | statsmodels/tsa/base/tsa_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/tsa_model.py | BSD-3-Clause |
def _init_dates(self, dates=None, freq=None):
"""
Initialize dates
Parameters
----------
dates : array_like, optional
An array like object containing dates.
freq : str, tuple, datetime.timedelta, DateOffset or None, optional
A frequency specification for either `dates` or the row labels from
the endog / exog data.
Notes
-----
Creates `self._index` and related attributes. `self._index` is always
a Pandas index, and it is always NumericIndex, DatetimeIndex, or
PeriodIndex.
If Pandas objects, endog / exog may have any type of index. If it is
an NumericIndex with values 0, 1, ..., nobs-1 or if it is (coerceable to)
a DatetimeIndex or PeriodIndex *with an associated frequency*, then it
is called a "supported" index. Otherwise it is called an "unsupported"
index.
Supported indexes are standardized (i.e. a list of date strings is
converted to a DatetimeIndex) and the result is put in `self._index`.
Unsupported indexes are ignored, and a supported NumericIndex is
generated and put in `self._index`. Warnings are issued in this case
to alert the user if the returned index from some operation (e.g.
forecasting) is different from the original data's index. However,
whenever possible (e.g. purely in-sample prediction), the original
index is returned.
The benefit of supported indexes is that they allow *forecasting*, i.e.
it is possible to extend them in a reasonable way. Thus every model
must have an underlying supported index, even if it is just a generated
NumericIndex.
"""
# Get our index from `dates` if available, otherwise from whatever
# Pandas index we might have retrieved from endog, exog
if dates is not None:
index = dates
else:
index = self.data.row_labels
# Sanity check that we do not have a `freq` without an index
if index is None and freq is not None:
raise ValueError("Frequency provided without associated index.")
# If an index is available, see if it is a date-based index or if it
# can be coerced to one. (If it cannot we'll fall back, below, to an
# internal, 0, 1, ... nobs-1 integer index for modeling purposes)
inferred_freq = False
if index is not None:
# Try to coerce to date-based index
if not isinstance(index, (DatetimeIndex, PeriodIndex)):
try:
# Only try to coerce non-numeric index types (string,
# list of date-times, etc.)
# Note that np.asarray(Float64Index([...])) yields an
# object dtype array in earlier versions of Pandas (and so
# will not have is_numeric_dtype == True), so explicitly
# check for it here. But note also that in very early
# Pandas (~0.12), Float64Index does not exist (and so the
# statsmodels compat makes it an empty tuple, so in that
# case also check if the first element is a float.
_index = np.asarray(index)
if (
is_numeric_dtype(_index)
or is_float_index(index)
or (isinstance(_index[0], float))
):
raise ValueError("Numeric index given")
# If a non-index Pandas series was given, only keep its
# values (because we must have a pd.Index type, below, and
# pd.to_datetime will return a Series when passed
# non-list-like objects)
if isinstance(index, Series):
index = index.values
# All coercion is done via pd.to_datetime
# Note: date coercion via pd.to_datetime does not handle
# string versions of PeriodIndex objects most of the time.
_index = to_datetime(index)
# Older versions of Pandas can sometimes fail here and
# return a numpy array - check to make sure it's an index
if not isinstance(_index, Index):
raise ValueError("Could not coerce to date index")
index = _index
except Exception:
# Only want to actually raise an exception if `dates` was
# provided but cannot be coerced. If we got the index from
# the row_labels, we'll just ignore it and use the integer
# index below
if dates is not None:
raise ValueError(
"Non-date index index provided to"
" `dates` argument."
)
# Now, if we were given, or coerced, a date-based index, make sure
# it has an associated frequency
if isinstance(index, (DatetimeIndex, PeriodIndex)):
# If no frequency, try to get an inferred frequency
if freq is None and index.freq is None:
freq = index.inferred_freq
# If we got an inferred frequncy, alert the user
if freq is not None:
inferred_freq = True
if freq is not None:
warnings.warn(
"No frequency information was provided, so inferred "
f"frequency {freq} will be used.",
ValueWarning,
stacklevel=2,
)
# Convert the passed freq to a pandas offset object
if freq is not None:
freq = to_offset(freq)
# Now, if no frequency information is available from the index
# itself or from the `freq` argument, raise an exception
if freq is None and index.freq is None:
# But again, only want to raise the exception if `dates`
# was provided.
if dates is not None:
raise ValueError(
"No frequency information was"
" provided with date index and no"
" frequency could be inferred."
)
# However, if the index itself has no frequency information but
# the `freq` argument is available (or was inferred), construct
# a new index with an associated frequency
elif freq is not None and index.freq is None:
resampled_index = date_range(
start=index[0], end=index[-1], freq=freq
)
if not inferred_freq and not (resampled_index == index).all():
raise ValueError(
"The given frequency argument could"
" not be matched to the given index."
)
index = resampled_index
# Finally, if the index itself has a frequency and there was
# also a given frequency, raise an exception if they are not
# equal
elif (
freq is not None
and not inferred_freq
and not (index.freq == freq)
):
raise ValueError(
"The given frequency argument is"
" incompatible with the given index."
)
# Finally, raise an exception if we could not coerce to date-based
# but we were given a frequency argument
elif freq is not None:
raise ValueError(
"Given index could not be coerced to dates"
" but `freq` argument was provided."
)
# Get attributes of the index
has_index = index is not None
date_index = isinstance(index, (DatetimeIndex, PeriodIndex))
int_index = is_int_index(index)
range_index = isinstance(index, RangeIndex)
has_freq = index.freq is not None if date_index else None
increment = Index(range(self.endog.shape[0]))
is_increment = index.equals(increment) if int_index else None
if date_index:
try:
is_monotonic = index.is_monotonic_increasing
except AttributeError:
# Remove after pandas 1.5 is minimum
is_monotonic = index.is_monotonic
else:
is_monotonic = None
# Issue warnings for unsupported indexes
if has_index and not (date_index or range_index or is_increment):
warnings.warn(
"An unsupported index was provided. As a result, forecasts "
"cannot be generated. To use the model for forecasting, use on the "
"the supported classes of index.",
ValueWarning,
stacklevel=2,
)
if date_index and not has_freq:
warnings.warn(
"A date index has been provided, but it has no"
" associated frequency information and so will be"
" ignored when e.g. forecasting.",
ValueWarning,
stacklevel=2,
)
if date_index and not is_monotonic:
warnings.warn(
"A date index has been provided, but it is not"
" monotonic and so will be ignored when e.g."
" forecasting.",
ValueWarning,
stacklevel=2,
)
# Construct the internal index
index_generated = False
valid_index = (
(date_index and has_freq and is_monotonic)
or (int_index and is_increment)
or range_index
)
if valid_index:
_index = index
else:
_index = increment
index_generated = True
self._index = _index
self._index_generated = index_generated
self._index_none = index is None
self._index_int64 = int_index and not range_index and not date_index
self._index_dates = date_index and not index_generated
self._index_freq = self._index.freq if self._index_dates else None
self._index_inferred_freq = inferred_freq
# For backwards compatibility, set data.dates, data.freq
self.data.dates = self._index if self._index_dates else None
self.data.freq = self._index.freqstr if self._index_dates else None | Initialize dates
Parameters
----------
dates : array_like, optional
An array like object containing dates.
freq : str, tuple, datetime.timedelta, DateOffset or None, optional
A frequency specification for either `dates` or the row labels from
the endog / exog data.
Notes
-----
Creates `self._index` and related attributes. `self._index` is always
a Pandas index, and it is always NumericIndex, DatetimeIndex, or
PeriodIndex.
If Pandas objects, endog / exog may have any type of index. If it is
an NumericIndex with values 0, 1, ..., nobs-1 or if it is (coerceable to)
a DatetimeIndex or PeriodIndex *with an associated frequency*, then it
is called a "supported" index. Otherwise it is called an "unsupported"
index.
Supported indexes are standardized (i.e. a list of date strings is
converted to a DatetimeIndex) and the result is put in `self._index`.
Unsupported indexes are ignored, and a supported NumericIndex is
generated and put in `self._index`. Warnings are issued in this case
to alert the user if the returned index from some operation (e.g.
forecasting) is different from the original data's index. However,
whenever possible (e.g. purely in-sample prediction), the original
index is returned.
The benefit of supported indexes is that they allow *forecasting*, i.e.
it is possible to extend them in a reasonable way. Thus every model
must have an underlying supported index, even if it is just a generated
NumericIndex. | _init_dates | python | statsmodels/statsmodels | statsmodels/tsa/base/tsa_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/tsa_model.py | BSD-3-Clause |
def _get_index_loc(self, key, base_index=None):
"""
Get the location of a specific key in an index
Parameters
----------
key : label
The key for which to find the location if the underlying index is
a DateIndex or a location if the underlying index is a RangeIndex
or an NumericIndex.
base_index : pd.Index, optional
Optionally the base index to search. If None, the model's index is
searched.
Returns
-------
loc : int
The location of the key
index : pd.Index
The index including the key; this is a copy of the original index
unless the index had to be expanded to accommodate `key`.
index_was_expanded : bool
Whether or not the index was expanded to accommodate `key`.
Notes
-----
If `key` is past the end of of the given index, and the index is either
an NumericIndex or a date index, this function extends the index up to
and including key, and then returns the location in the new index.
"""
if base_index is None:
base_index = self._index
return get_index_loc(key, base_index) | Get the location of a specific key in an index
Parameters
----------
key : label
The key for which to find the location if the underlying index is
a DateIndex or a location if the underlying index is a RangeIndex
or an NumericIndex.
base_index : pd.Index, optional
Optionally the base index to search. If None, the model's index is
searched.
Returns
-------
loc : int
The location of the key
index : pd.Index
The index including the key; this is a copy of the original index
unless the index had to be expanded to accommodate `key`.
index_was_expanded : bool
Whether or not the index was expanded to accommodate `key`.
Notes
-----
If `key` is past the end of of the given index, and the index is either
an NumericIndex or a date index, this function extends the index up to
and including key, and then returns the location in the new index. | _get_index_loc | python | statsmodels/statsmodels | statsmodels/tsa/base/tsa_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/tsa_model.py | BSD-3-Clause |
def _get_index_label_loc(self, key, base_index=None):
"""
Get the location of a specific key in an index or model row labels
Parameters
----------
key : label
The key for which to find the location if the underlying index is
a DateIndex or is only being used as row labels, or a location if
the underlying index is a RangeIndex or an NumericIndex.
base_index : pd.Index, optional
Optionally the base index to search. If None, the model's index is
searched.
Returns
-------
loc : int
The location of the key
index : pd.Index
The index including the key; this is a copy of the original index
unless the index had to be expanded to accommodate `key`.
index_was_expanded : bool
Whether or not the index was expanded to accommodate `key`.
Notes
-----
This method expands on `_get_index_loc` by first trying the given
base index (or the model's index if the base index was not given) and
then falling back to try again with the model row labels as the base
index.
"""
if base_index is None:
base_index = self._index
return get_index_label_loc(key, base_index, self.data.row_labels) | Get the location of a specific key in an index or model row labels
Parameters
----------
key : label
The key for which to find the location if the underlying index is
a DateIndex or is only being used as row labels, or a location if
the underlying index is a RangeIndex or an NumericIndex.
base_index : pd.Index, optional
Optionally the base index to search. If None, the model's index is
searched.
Returns
-------
loc : int
The location of the key
index : pd.Index
The index including the key; this is a copy of the original index
unless the index had to be expanded to accommodate `key`.
index_was_expanded : bool
Whether or not the index was expanded to accommodate `key`.
Notes
-----
This method expands on `_get_index_loc` by first trying the given
base index (or the model's index if the base index was not given) and
then falling back to try again with the model row labels as the base
index. | _get_index_label_loc | python | statsmodels/statsmodels | statsmodels/tsa/base/tsa_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/tsa_model.py | BSD-3-Clause |
def _get_prediction_index(
self, start, end, index=None, silent=False
) -> tuple[int, int, int, Index | None]:
"""
Get the location of a specific key in an index or model row labels
Parameters
----------
start : label
The key at which to start prediction. Depending on the underlying
model's index, may be an integer, a date (string, datetime object,
pd.Timestamp, or pd.Period object), or some other object in the
model's row labels.
end : label
The key at which to end prediction (note that this key will be
*included* in prediction). Depending on the underlying
model's index, may be an integer, a date (string, datetime object,
pd.Timestamp, or pd.Period object), or some other object in the
model's row labels.
index : pd.Index, optional
Optionally an index to associate the predicted results to. If None,
an attempt is made to create an index for the predicted results
from the model's index or model's row labels.
silent : bool, optional
Argument to silence warnings.
Returns
-------
start : int
The index / observation location at which to begin prediction.
end : int
The index / observation location at which to end in-sample
prediction. The maximum value for this is nobs-1.
out_of_sample : int
The number of observations to forecast after the end of the sample.
prediction_index : pd.Index or None
The index associated with the prediction results. This index covers
the range [start, end + out_of_sample]. If the model has no given
index and no given row labels (i.e. endog/exog is not Pandas), then
this will be None.
Notes
-----
The arguments `start` and `end` behave differently, depending on if
they are integer or not. If either is an integer, then it is assumed
to refer to a *location* in the index, not to an index value. On the
other hand, if it is a date string or some other type of object, then
it is assumed to refer to an index *value*. In all cases, the returned
`start` and `end` values refer to index *locations* (so in the former
case, the given location is validated and returned whereas in the
latter case a location is found that corresponds to the given index
value).
This difference in behavior is necessary to support `RangeIndex`. This
is because integers for a RangeIndex could refer either to index values
or to index locations in an ambiguous way (while for `NumericIndex`,
since we have required them to be full indexes, there is no ambiguity).
"""
nobs = len(self.endog)
return get_prediction_index(
start,
end,
nobs,
base_index=self._index,
index=index,
silent=silent,
index_none=self._index_none,
index_generated=self._index_generated,
data=self.data,
) | Get the location of a specific key in an index or model row labels
Parameters
----------
start : label
The key at which to start prediction. Depending on the underlying
model's index, may be an integer, a date (string, datetime object,
pd.Timestamp, or pd.Period object), or some other object in the
model's row labels.
end : label
The key at which to end prediction (note that this key will be
*included* in prediction). Depending on the underlying
model's index, may be an integer, a date (string, datetime object,
pd.Timestamp, or pd.Period object), or some other object in the
model's row labels.
index : pd.Index, optional
Optionally an index to associate the predicted results to. If None,
an attempt is made to create an index for the predicted results
from the model's index or model's row labels.
silent : bool, optional
Argument to silence warnings.
Returns
-------
start : int
The index / observation location at which to begin prediction.
end : int
The index / observation location at which to end in-sample
prediction. The maximum value for this is nobs-1.
out_of_sample : int
The number of observations to forecast after the end of the sample.
prediction_index : pd.Index or None
The index associated with the prediction results. This index covers
the range [start, end + out_of_sample]. If the model has no given
index and no given row labels (i.e. endog/exog is not Pandas), then
this will be None.
Notes
-----
The arguments `start` and `end` behave differently, depending on if
they are integer or not. If either is an integer, then it is assumed
to refer to a *location* in the index, not to an index value. On the
other hand, if it is a date string or some other type of object, then
it is assumed to refer to an index *value*. In all cases, the returned
`start` and `end` values refer to index *locations* (so in the former
case, the given location is validated and returned whereas in the
latter case a location is found that corresponds to the given index
value).
This difference in behavior is necessary to support `RangeIndex`. This
is because integers for a RangeIndex could refer either to index values
or to index locations in an ambiguous way (while for `NumericIndex`,
since we have required them to be full indexes, there is no ambiguity). | _get_prediction_index | python | statsmodels/statsmodels | statsmodels/tsa/base/tsa_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/base/tsa_model.py | BSD-3-Clause |
def ets_austourists_fit_results_R():
"""
Dictionary of ets fit results obtained with script ``results/fit_ets.R``.
"""
path = (
pathlib.Path(__file__).parent
/ "results"
/ "fit_ets_results_seasonal.json"
)
return obtain_R_results(path) | Dictionary of ets fit results obtained with script ``results/fit_ets.R``. | ets_austourists_fit_results_R | python | statsmodels/statsmodels | statsmodels/tsa/tests/test_exponential_smoothing.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/tests/test_exponential_smoothing.py | BSD-3-Clause |
def ets_oildata_fit_results_R():
"""
Dictionary of ets fit results obtained with script ``results/fit_ets.R``.
"""
path = (
pathlib.Path(__file__).parent
/ "results"
/ "fit_ets_results_nonseasonal.json"
)
return obtain_R_results(path) | Dictionary of ets fit results obtained with script ``results/fit_ets.R``. | ets_oildata_fit_results_R | python | statsmodels/statsmodels | statsmodels/tsa/tests/test_exponential_smoothing.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/tests/test_exponential_smoothing.py | BSD-3-Clause |
def fit_austourists_with_R_params(model, results_R, set_state=False):
"""
Fit the model with params as found by R's forecast package
"""
params = get_params_from_R(results_R)
with model.fix_params(dict(zip(model.param_names, params))):
fit = model.fit(disp=False)
if set_state:
states_R = get_states_from_R(results_R, model._k_states)
fit.states = states_R
return fit | Fit the model with params as found by R's forecast package | fit_austourists_with_R_params | python | statsmodels/statsmodels | statsmodels/tsa/tests/test_exponential_smoothing.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/tests/test_exponential_smoothing.py | BSD-3-Clause |
def test_simulate_keywords(austourists_model_fit):
"""
check whether all keywords are accepted and work without throwing errors.
"""
fit = austourists_model_fit
# test anchor
assert_almost_equal(
fit.simulate(4, anchor=-1, random_state=0).values,
fit.simulate(4, anchor="2015-12-31", random_state=0).values,
)
assert_almost_equal(
fit.simulate(4, anchor="end", random_state=0).values,
fit.simulate(4, anchor="2015-12-31", random_state=0).values,
)
# test different random error options
fit.simulate(4, repetitions=10)
fit.simulate(4, repetitions=10, random_errors=scipy.stats.norm)
fit.simulate(4, repetitions=10, random_errors=scipy.stats.norm())
fit.simulate(4, repetitions=10, random_errors=np.random.randn(4, 10))
fit.simulate(4, repetitions=10, random_errors="bootstrap")
# test seeding
res = fit.simulate(4, repetitions=10, random_state=10).values
res2 = fit.simulate(
4, repetitions=10, random_state=np.random.RandomState(10)
).values
assert np.all(res == res2) | check whether all keywords are accepted and work without throwing errors. | test_simulate_keywords | python | statsmodels/statsmodels | statsmodels/tsa/tests/test_exponential_smoothing.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/tests/test_exponential_smoothing.py | BSD-3-Clause |
def fix_ols_attribute(val, attrib, res):
"""
fixes to correct for df adjustment b/t OLS and AutoReg with nonrobust cov
"""
nparam = res.k_constant + res.df_model
nobs = nparam + res.df_resid
df_correction = (nobs - nparam) / nobs
if attrib in ("scale",):
return val * df_correction
elif attrib == "df_model":
return val + res.k_constant
elif res.cov_type != "nonrobust":
return val
elif attrib in ("bse", "conf_int"):
return val * np.sqrt(df_correction)
elif attrib in ("cov_params", "scale"):
return val * df_correction
elif attrib in ("f_test",):
return val / df_correction
elif attrib in ("tvalues",):
return val / np.sqrt(df_correction)
return val | fixes to correct for df adjustment b/t OLS and AutoReg with nonrobust cov | fix_ols_attribute | python | statsmodels/statsmodels | statsmodels/tsa/tests/test_ar.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/tests/test_ar.py | BSD-3-Clause |
def predict_conditional(self, params):
"""
In-sample prediction, conditional on the current and previous regime
Parameters
----------
params : array_like
Array of parameters at which to create predictions.
Returns
-------
predict : array_like
Array of predictions conditional on current, and possibly past,
regimes
"""
params = np.array(params, ndmin=1)
# Prediction is based on:
# y_t = x_t beta^{(S_t)} +
# \phi_1^{(S_t)} (y_{t-1} - x_{t-1} beta^{(S_t-1)}) + ...
# \phi_p^{(S_t)} (y_{t-p} - x_{t-p} beta^{(S_t-p)}) + eps_t
if self._k_exog > 0:
xb = []
for i in range(self.k_regimes):
coeffs = params[self.parameters[i, 'exog']]
xb.append(np.dot(self.orig_exog, coeffs))
predict = np.zeros(
(self.k_regimes,) * (self.order + 1) + (self.nobs,),
dtype=np.promote_types(np.float64, params.dtype))
# Iterate over S_{t} = i
for i in range(self.k_regimes):
ar_coeffs = params[self.parameters[i, 'autoregressive']]
# y_t - x_t beta^{(S_t)}
ix = self._predict_slices[:]
ix[0] = i
ix = tuple(ix)
if self._k_exog > 0:
predict[ix] += xb[i][self.order:]
# Iterate over j = 2, .., p
for j in range(1, self.order + 1):
for k in range(self.k_regimes):
# This gets a specific time-period / regime slice:
# S_{t} = i, S_{t-j} = k, across all other time-period /
# regime slices.
ix = self._predict_slices[:]
ix[0] = i
ix[j] = k
ix = tuple(ix)
start = self.order - j
end = -j
if self._k_exog > 0:
predict[ix] += ar_coeffs[j-1] * (
self.orig_endog[start:end] - xb[k][start:end])
else:
predict[ix] += ar_coeffs[j-1] * (
self.orig_endog[start:end])
return predict | In-sample prediction, conditional on the current and previous regime
Parameters
----------
params : array_like
Array of parameters at which to create predictions.
Returns
-------
predict : array_like
Array of predictions conditional on current, and possibly past,
regimes | predict_conditional | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_autoregression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_autoregression.py | BSD-3-Clause |
def _conditional_loglikelihoods(self, params):
"""
Compute loglikelihoods conditional on the current period's regime and
the last `self.order` regimes.
"""
# Get the residuals
resid = self._resid(params)
# Compute the conditional likelihoods
variance = params[self.parameters['variance']].squeeze()
if self.switching_variance:
variance = np.reshape(variance, (self.k_regimes, 1, 1))
conditional_loglikelihoods = (
-0.5 * resid**2 / variance - 0.5 * np.log(2 * np.pi * variance))
return conditional_loglikelihoods | Compute loglikelihoods conditional on the current period's regime and
the last `self.order` regimes. | _conditional_loglikelihoods | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_autoregression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_autoregression.py | BSD-3-Clause |
def _em_iteration(self, params0):
"""
EM iteration
"""
# Inherited parameters
result, params1 = markov_switching.MarkovSwitching._em_iteration(
self, params0)
tmp = np.sqrt(result.smoothed_marginal_probabilities)
# Regression coefficients
coeffs = None
if self._k_exog > 0:
coeffs = self._em_exog(result, self.endog, self.exog,
self.parameters.switching['exog'], tmp)
for i in range(self.k_regimes):
params1[self.parameters[i, 'exog']] = coeffs[i]
# Autoregressive
if self.order > 0:
if self._k_exog > 0:
ar_coeffs, variance = self._em_autoregressive(
result, coeffs)
else:
ar_coeffs = self._em_exog(
result, self.endog, self.exog_ar,
self.parameters.switching['autoregressive'])
variance = self._em_variance(
result, self.endog, self.exog_ar, ar_coeffs, tmp)
for i in range(self.k_regimes):
params1[self.parameters[i, 'autoregressive']] = ar_coeffs[i]
params1[self.parameters['variance']] = variance
return result, params1 | EM iteration | _em_iteration | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_autoregression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_autoregression.py | BSD-3-Clause |
def _em_autoregressive(self, result, betas, tmp=None):
"""
EM step for autoregressive coefficients and variances
"""
if tmp is None:
tmp = np.sqrt(result.smoothed_marginal_probabilities)
resid = np.zeros((self.k_regimes, self.nobs + self.order))
resid[:] = self.orig_endog
if self._k_exog > 0:
for i in range(self.k_regimes):
resid[i] -= np.dot(self.orig_exog, betas[i])
# The difference between this and `_em_exog` is that here we have a
# different endog and exog for each regime
coeffs = np.zeros((self.k_regimes,) + (self.order,))
variance = np.zeros((self.k_regimes,))
exog = np.zeros((self.nobs, self.order))
for i in range(self.k_regimes):
endog = resid[i, self.order:]
exog = lagmat(resid[i], self.order)[self.order:]
tmp_endog = tmp[i] * endog
tmp_exog = tmp[i][:, None] * exog
coeffs[i] = np.dot(np.linalg.pinv(tmp_exog), tmp_endog)
if self.switching_variance:
tmp_resid = endog - np.dot(exog, coeffs[i])
variance[i] = (np.sum(
tmp_resid**2 * result.smoothed_marginal_probabilities[i]) /
np.sum(result.smoothed_marginal_probabilities[i]))
else:
tmp_resid = tmp_endog - np.dot(tmp_exog, coeffs[i])
variance[i] = np.sum(tmp_resid**2)
# Variances
if not self.switching_variance:
variance = variance.sum() / self.nobs
return coeffs, variance | EM step for autoregressive coefficients and variances | _em_autoregressive | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_autoregression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_autoregression.py | BSD-3-Clause |
def start_params(self):
"""
(array) Starting parameters for maximum likelihood estimation.
"""
# Inherited parameters
params = markov_switching.MarkovSwitching.start_params.fget(self)
# OLS for starting parameters
endog = self.endog.copy()
if self._k_exog > 0 and self.order > 0:
exog = np.c_[self.exog, self.exog_ar]
elif self._k_exog > 0:
exog = self.exog
elif self.order > 0:
exog = self.exog_ar
if self._k_exog > 0 or self.order > 0:
beta = np.dot(np.linalg.pinv(exog), endog)
variance = np.var(endog - np.dot(exog, beta))
else:
variance = np.var(endog)
# Regression coefficients
if self._k_exog > 0:
if np.any(self.switching_coeffs):
for i in range(self.k_regimes):
params[self.parameters[i, 'exog']] = (
beta[:self._k_exog] * (i / self.k_regimes))
else:
params[self.parameters['exog']] = beta[:self._k_exog]
# Autoregressive
if self.order > 0:
if np.any(self.switching_ar):
for i in range(self.k_regimes):
params[self.parameters[i, 'autoregressive']] = (
beta[self._k_exog:] * (i / self.k_regimes))
else:
params[self.parameters['autoregressive']] = beta[self._k_exog:]
# Variance
if self.switching_variance:
params[self.parameters['variance']] = (
np.linspace(variance / 10., variance, num=self.k_regimes))
else:
params[self.parameters['variance']] = variance
return params | (array) Starting parameters for maximum likelihood estimation. | start_params | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_autoregression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_autoregression.py | BSD-3-Clause |
def param_names(self):
"""
(list of str) List of human readable parameter names (for parameters
actually included in the model).
"""
# Inherited parameters
param_names = np.array(
markov_regression.MarkovRegression.param_names.fget(self),
dtype=object)
# Autoregressive
if np.any(self.switching_ar):
for i in range(self.k_regimes):
param_names[self.parameters[i, 'autoregressive']] = [
'ar.L%d[%d]' % (j+1, i) for j in range(self.order)]
else:
param_names[self.parameters['autoregressive']] = [
'ar.L%d' % (j+1) for j in range(self.order)]
return param_names.tolist() | (list of str) List of human readable parameter names (for parameters
actually included in the model). | param_names | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_autoregression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_autoregression.py | BSD-3-Clause |
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evaluation.
"""
# Inherited parameters
constrained = super().transform_params(
unconstrained)
# Autoregressive
# TODO may provide unexpected results when some coefficients are not
# switching
for i in range(self.k_regimes):
s = self.parameters[i, 'autoregressive']
constrained[s] = constrain_stationary_univariate(
unconstrained[s])
return constrained | Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evaluation. | transform_params | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_autoregression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_autoregression.py | BSD-3-Clause |
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evaluation, to
be transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
"""
# Inherited parameters
unconstrained = super().untransform_params(
constrained)
# Autoregressive
# TODO may provide unexpected results when some coefficients are not
# switching
for i in range(self.k_regimes):
s = self.parameters[i, 'autoregressive']
unconstrained[s] = unconstrain_stationary_univariate(
constrained[s])
return unconstrained | Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evaluation, to
be transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer. | untransform_params | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_autoregression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_autoregression.py | BSD-3-Clause |
def predict_conditional(self, params):
"""
In-sample prediction, conditional on the current regime
Parameters
----------
params : array_like
Array of parameters at which to perform prediction.
Returns
-------
predict : array_like
Array of predictions conditional on current, and possibly past,
regimes
"""
params = np.array(params, ndmin=1)
# Since in the base model the values are the same across columns, we
# only compute a single column, and then expand it below.
predict = np.zeros((self.k_regimes, self.nobs), dtype=params.dtype)
for i in range(self.k_regimes):
# Predict
if self._k_exog > 0:
coeffs = params[self.parameters[i, 'exog']]
predict[i] = np.dot(self.exog, coeffs)
return predict[:, None, :] | In-sample prediction, conditional on the current regime
Parameters
----------
params : array_like
Array of parameters at which to perform prediction.
Returns
-------
predict : array_like
Array of predictions conditional on current, and possibly past,
regimes | predict_conditional | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_regression.py | BSD-3-Clause |
def _conditional_loglikelihoods(self, params):
"""
Compute loglikelihoods conditional on the current period's regime
"""
# Get residuals
resid = self._resid(params)
# Compute the conditional likelihoods
variance = params[self.parameters['variance']].squeeze()
if self.switching_variance:
variance = np.reshape(variance, (self.k_regimes, 1, 1))
conditional_loglikelihoods = (
-0.5 * resid**2 / variance - 0.5 * np.log(2 * np.pi * variance))
return conditional_loglikelihoods | Compute loglikelihoods conditional on the current period's regime | _conditional_loglikelihoods | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_regression.py | BSD-3-Clause |
def _em_iteration(self, params0):
"""
EM iteration
Notes
-----
This uses the inherited _em_iteration method for computing the
non-TVTP transition probabilities and then performs the EM step for
regression coefficients and variances.
"""
# Inherited parameters
result, params1 = super()._em_iteration(params0)
tmp = np.sqrt(result.smoothed_marginal_probabilities)
# Regression coefficients
coeffs = None
if self._k_exog > 0:
coeffs = self._em_exog(result, self.endog, self.exog,
self.parameters.switching['exog'], tmp)
for i in range(self.k_regimes):
params1[self.parameters[i, 'exog']] = coeffs[i]
# Variances
params1[self.parameters['variance']] = self._em_variance(
result, self.endog, self.exog, coeffs, tmp)
# params1[self.parameters['variance']] = 0.33282116
return result, params1 | EM iteration
Notes
-----
This uses the inherited _em_iteration method for computing the
non-TVTP transition probabilities and then performs the EM step for
regression coefficients and variances. | _em_iteration | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_regression.py | BSD-3-Clause |
def _em_exog(self, result, endog, exog, switching, tmp=None):
"""
EM step for regression coefficients
"""
k_exog = exog.shape[1]
coeffs = np.zeros((self.k_regimes, k_exog))
# First, estimate non-switching coefficients
if not np.all(switching):
nonswitching_exog = exog[:, ~switching]
nonswitching_coeffs = (
np.dot(np.linalg.pinv(nonswitching_exog), endog))
coeffs[:, ~switching] = nonswitching_coeffs
endog = endog - np.dot(nonswitching_exog, nonswitching_coeffs)
# Next, get switching coefficients
if np.any(switching):
switching_exog = exog[:, switching]
if tmp is None:
tmp = np.sqrt(result.smoothed_marginal_probabilities)
for i in range(self.k_regimes):
tmp_endog = tmp[i] * endog
tmp_exog = tmp[i][:, np.newaxis] * switching_exog
coeffs[i, switching] = (
np.dot(np.linalg.pinv(tmp_exog), tmp_endog))
return coeffs | EM step for regression coefficients | _em_exog | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_regression.py | BSD-3-Clause |
def _em_variance(self, result, endog, exog, betas, tmp=None):
"""
EM step for variances
"""
k_exog = 0 if exog is None else exog.shape[1]
if self.switching_variance:
variance = np.zeros(self.k_regimes)
for i in range(self.k_regimes):
if k_exog > 0:
resid = endog - np.dot(exog, betas[i])
else:
resid = endog
variance[i] = (
np.sum(resid**2 *
result.smoothed_marginal_probabilities[i]) /
np.sum(result.smoothed_marginal_probabilities[i]))
else:
variance = 0
if tmp is None:
tmp = np.sqrt(result.smoothed_marginal_probabilities)
for i in range(self.k_regimes):
tmp_endog = tmp[i] * endog
if k_exog > 0:
tmp_exog = tmp[i][:, np.newaxis] * exog
resid = tmp_endog - np.dot(tmp_exog, betas[i])
else:
resid = tmp_endog
variance += np.sum(resid**2)
variance /= self.nobs
return variance | EM step for variances | _em_variance | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_regression.py | BSD-3-Clause |
def start_params(self):
"""
(array) Starting parameters for maximum likelihood estimation.
Notes
-----
These are not very sophisticated and / or good. We set equal transition
probabilities and interpolate regression coefficients between zero and
the OLS estimates, where the interpolation is based on the regime
number. We rely heavily on the EM algorithm to quickly find much better
starting parameters, which are then used by the typical scoring
approach.
"""
# Inherited parameters
params = markov_switching.MarkovSwitching.start_params.fget(self)
# Regression coefficients
if self._k_exog > 0:
beta = np.dot(np.linalg.pinv(self.exog), self.endog)
variance = np.var(self.endog - np.dot(self.exog, beta))
if np.any(self.switching_coeffs):
for i in range(self.k_regimes):
params[self.parameters[i, 'exog']] = (
beta * (i / self.k_regimes))
else:
params[self.parameters['exog']] = beta
else:
variance = np.var(self.endog)
# Variances
if self.switching_variance:
params[self.parameters['variance']] = (
np.linspace(variance / 10., variance, num=self.k_regimes))
else:
params[self.parameters['variance']] = variance
return params | (array) Starting parameters for maximum likelihood estimation.
Notes
-----
These are not very sophisticated and / or good. We set equal transition
probabilities and interpolate regression coefficients between zero and
the OLS estimates, where the interpolation is based on the regime
number. We rely heavily on the EM algorithm to quickly find much better
starting parameters, which are then used by the typical scoring
approach. | start_params | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_regression.py | BSD-3-Clause |
def param_names(self):
"""
(list of str) List of human readable parameter names (for parameters
actually included in the model).
"""
# Inherited parameters
param_names = np.array(
markov_switching.MarkovSwitching.param_names.fget(self),
dtype=object)
# Regression coefficients
if np.any(self.switching_coeffs):
for i in range(self.k_regimes):
param_names[self.parameters[i, 'exog']] = [
'%s[%d]' % (exog_name, i) for exog_name in self.exog_names]
else:
param_names[self.parameters['exog']] = self.exog_names
# Variances
if self.switching_variance:
for i in range(self.k_regimes):
param_names[self.parameters[i, 'variance']] = 'sigma2[%d]' % i
else:
param_names[self.parameters['variance']] = 'sigma2'
return param_names.tolist() | (list of str) List of human readable parameter names (for parameters
actually included in the model). | param_names | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_regression.py | BSD-3-Clause |
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evaluation.
"""
# Inherited parameters
constrained = super().transform_params(
unconstrained)
# Nothing to do for regression coefficients
constrained[self.parameters['exog']] = (
unconstrained[self.parameters['exog']])
# Force variances to be positive
constrained[self.parameters['variance']] = (
unconstrained[self.parameters['variance']]**2)
return constrained | Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evaluation. | transform_params | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_regression.py | BSD-3-Clause |
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evaluation, to
be transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
"""
# Inherited parameters
unconstrained = super().untransform_params(
constrained)
# Nothing to do for regression coefficients
unconstrained[self.parameters['exog']] = (
constrained[self.parameters['exog']])
# Force variances to be positive
unconstrained[self.parameters['variance']] = (
constrained[self.parameters['variance']]**0.5)
return unconstrained | Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evaluation, to
be transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer. | untransform_params | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_regression.py | BSD-3-Clause |
def _logistic(x):
"""
Note that this is not a vectorized function
"""
x = np.array(x)
# np.exp(x) / (1 + np.exp(x))
if x.ndim == 0:
y = np.reshape(x, (1, 1, 1))
# np.exp(x[i]) / (1 + np.sum(np.exp(x[:])))
elif x.ndim == 1:
y = np.reshape(x, (len(x), 1, 1))
# np.exp(x[i,t]) / (1 + np.sum(np.exp(x[:,t])))
elif x.ndim == 2:
y = np.reshape(x, (x.shape[0], 1, x.shape[1]))
# np.exp(x[i,j,t]) / (1 + np.sum(np.exp(x[:,j,t])))
elif x.ndim == 3:
y = x
else:
raise NotImplementedError
tmp = np.c_[np.zeros((y.shape[-1], y.shape[1], 1)), y.T].T
evaluated = np.reshape(np.exp(y - logsumexp(tmp, axis=0)), x.shape)
return evaluated | Note that this is not a vectorized function | _logistic | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def _partials_logistic(x):
"""
Note that this is not a vectorized function
"""
tmp = _logistic(x)
# k
if tmp.ndim == 0:
return tmp - tmp**2
# k x k
elif tmp.ndim == 1:
partials = np.diag(tmp - tmp**2)
# k x k x t
elif tmp.ndim == 2:
partials = [np.diag(tmp[:, t] - tmp[:, t]**2)
for t in range(tmp.shape[1])]
shape = tmp.shape[1], tmp.shape[0], tmp.shape[0]
partials = np.concatenate(partials).reshape(shape).transpose((1, 2, 0))
# k x k x j x t
else:
partials = [[np.diag(tmp[:, j, t] - tmp[:, j, t]**2)
for t in range(tmp.shape[2])]
for j in range(tmp.shape[1])]
shape = tmp.shape[1], tmp.shape[2], tmp.shape[0], tmp.shape[0]
partials = np.concatenate(partials).reshape(shape).transpose(
(2, 3, 0, 1))
for i in range(tmp.shape[0]):
for j in range(i):
partials[i, j, ...] = -tmp[i, ...] * tmp[j, ...]
partials[j, i, ...] = partials[i, j, ...]
return partials | Note that this is not a vectorized function | _partials_logistic | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def cy_hamilton_filter_log(initial_probabilities, regime_transition,
conditional_loglikelihoods, model_order):
"""
Hamilton filter in log space using Cython inner loop.
Parameters
----------
initial_probabilities : ndarray
Array of initial probabilities, shaped (k_regimes,) giving the
distribution of the regime process at time t = -order where order
is a nonnegative integer.
regime_transition : ndarray
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs + order). Entry [i, j,
t] contains the probability of moving from j at time t-1 to i at
time t, so each matrix regime_transition[:, :, t] should be left
stochastic. The first order entries and initial_probabilities are
used to produce the initial joint distribution of dimension order +
1 at time t=0.
conditional_loglikelihoods : ndarray
Array of loglikelihoods conditional on the last `order+1` regimes,
shaped (k_regimes,)*(order + 1) + (nobs,).
Returns
-------
filtered_marginal_probabilities : ndarray
Array containing Pr[S_t=s_t | Y_t] - the probability of being in each
regime conditional on time t information. Shaped (k_regimes, nobs).
predicted_joint_probabilities : ndarray
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
joint_loglikelihoods : ndarray
Array of loglikelihoods condition on time t information,
shaped (nobs,).
filtered_joint_probabilities : ndarray
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
"""
# Dimensions
k_regimes = len(initial_probabilities)
nobs = conditional_loglikelihoods.shape[-1]
order = conditional_loglikelihoods.ndim - 2
dtype = conditional_loglikelihoods.dtype
# Check for compatible shapes.
incompatible_shapes = (
regime_transition.shape[-1] not in (1, nobs + model_order)
or regime_transition.shape[:2] != (k_regimes, k_regimes)
or conditional_loglikelihoods.shape[0] != k_regimes)
if incompatible_shapes:
raise ValueError('Arguments do not have compatible shapes')
# Convert to log space
initial_probabilities = np.log(initial_probabilities)
regime_transition = np.log(np.maximum(regime_transition, 1e-20))
# Storage
# Pr[S_t = s_t | Y_t]
filtered_marginal_probabilities = (
np.zeros((k_regimes, nobs), dtype=dtype))
# Pr[S_t = s_t, ... S_{t-r} = s_{t-r} | Y_{t-1}]
# Has k_regimes^(order+1) elements
predicted_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs,), dtype=dtype)
# log(f(y_t | Y_{t-1}))
joint_loglikelihoods = np.zeros((nobs,), dtype)
# Pr[S_t = s_t, ... S_{t-r+1} = s_{t-r+1} | Y_t]
# Has k_regimes^order elements
filtered_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs + 1,), dtype=dtype)
# Initial probabilities
filtered_marginal_probabilities[:, 0] = initial_probabilities
tmp = np.copy(initial_probabilities)
shape = (k_regimes, k_regimes)
transition_t = 0
for i in range(order):
if regime_transition.shape[-1] > 1:
transition_t = i
tmp = np.reshape(regime_transition[..., transition_t],
shape + (1,) * i) + tmp
filtered_joint_probabilities[..., 0] = tmp
# Get appropriate subset of transition matrix
if regime_transition.shape[-1] > 1:
regime_transition = regime_transition[..., model_order:]
# Run Cython filter iterations
prefix, dtype, _ = find_best_blas_type((
regime_transition, conditional_loglikelihoods, joint_loglikelihoods,
predicted_joint_probabilities, filtered_joint_probabilities))
func = prefix_hamilton_filter_log_map[prefix]
func(nobs, k_regimes, order, regime_transition,
conditional_loglikelihoods.reshape(k_regimes**(order+1), nobs),
joint_loglikelihoods,
predicted_joint_probabilities.reshape(k_regimes**(order+1), nobs),
filtered_joint_probabilities.reshape(k_regimes**(order+1), nobs+1))
# Save log versions for smoother
predicted_joint_probabilities_log = predicted_joint_probabilities
filtered_joint_probabilities_log = filtered_joint_probabilities
# Convert out of log scale
predicted_joint_probabilities = np.exp(predicted_joint_probabilities)
filtered_joint_probabilities = np.exp(filtered_joint_probabilities)
# S_t | t
filtered_marginal_probabilities = filtered_joint_probabilities[..., 1:]
for i in range(1, filtered_marginal_probabilities.ndim - 1):
filtered_marginal_probabilities = np.sum(
filtered_marginal_probabilities, axis=-2)
return (filtered_marginal_probabilities, predicted_joint_probabilities,
joint_loglikelihoods, filtered_joint_probabilities[..., 1:],
predicted_joint_probabilities_log,
filtered_joint_probabilities_log[..., 1:]) | Hamilton filter in log space using Cython inner loop.
Parameters
----------
initial_probabilities : ndarray
Array of initial probabilities, shaped (k_regimes,) giving the
distribution of the regime process at time t = -order where order
is a nonnegative integer.
regime_transition : ndarray
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs + order). Entry [i, j,
t] contains the probability of moving from j at time t-1 to i at
time t, so each matrix regime_transition[:, :, t] should be left
stochastic. The first order entries and initial_probabilities are
used to produce the initial joint distribution of dimension order +
1 at time t=0.
conditional_loglikelihoods : ndarray
Array of loglikelihoods conditional on the last `order+1` regimes,
shaped (k_regimes,)*(order + 1) + (nobs,).
Returns
-------
filtered_marginal_probabilities : ndarray
Array containing Pr[S_t=s_t | Y_t] - the probability of being in each
regime conditional on time t information. Shaped (k_regimes, nobs).
predicted_joint_probabilities : ndarray
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
joint_loglikelihoods : ndarray
Array of loglikelihoods condition on time t information,
shaped (nobs,).
filtered_joint_probabilities : ndarray
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,). | cy_hamilton_filter_log | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def cy_kim_smoother_log(regime_transition, predicted_joint_probabilities,
filtered_joint_probabilities):
"""
Kim smoother in log space using Cython inner loop.
Parameters
----------
regime_transition : ndarray
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs).
predicted_joint_probabilities : ndarray
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
filtered_joint_probabilities : ndarray
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
Returns
-------
smoothed_joint_probabilities : ndarray
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_T] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on all information.
Shaped (k_regimes,) * (order + 1) + (nobs,).
smoothed_marginal_probabilities : ndarray
Array containing Pr[S_t=s_t | Y_T] - the probability of being in each
regime conditional on all information. Shaped (k_regimes, nobs).
"""
# Dimensions
k_regimes = filtered_joint_probabilities.shape[0]
nobs = filtered_joint_probabilities.shape[-1]
order = filtered_joint_probabilities.ndim - 2
dtype = filtered_joint_probabilities.dtype
# Storage
smoothed_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs,), dtype=dtype)
# Get appropriate subset of transition matrix
if regime_transition.shape[-1] == nobs + order:
regime_transition = regime_transition[..., order:]
# Convert to log space
regime_transition = np.log(np.maximum(regime_transition, 1e-20))
# Run Cython smoother iterations
prefix, dtype, _ = find_best_blas_type((
regime_transition, predicted_joint_probabilities,
filtered_joint_probabilities))
func = prefix_kim_smoother_log_map[prefix]
func(nobs, k_regimes, order, regime_transition,
predicted_joint_probabilities.reshape(k_regimes**(order+1), nobs),
filtered_joint_probabilities.reshape(k_regimes**(order+1), nobs),
smoothed_joint_probabilities.reshape(k_regimes**(order+1), nobs))
# Convert back from log space
smoothed_joint_probabilities = np.exp(smoothed_joint_probabilities)
# Get smoothed marginal probabilities S_t | T by integrating out
# S_{t-k+1}, S_{t-k+2}, ..., S_{t-1}
smoothed_marginal_probabilities = smoothed_joint_probabilities
for i in range(1, smoothed_marginal_probabilities.ndim - 1):
smoothed_marginal_probabilities = np.sum(
smoothed_marginal_probabilities, axis=-2)
return smoothed_joint_probabilities, smoothed_marginal_probabilities | Kim smoother in log space using Cython inner loop.
Parameters
----------
regime_transition : ndarray
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs).
predicted_joint_probabilities : ndarray
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
filtered_joint_probabilities : ndarray
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
Returns
-------
smoothed_joint_probabilities : ndarray
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_T] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on all information.
Shaped (k_regimes,) * (order + 1) + (nobs,).
smoothed_marginal_probabilities : ndarray
Array containing Pr[S_t=s_t | Y_T] - the probability of being in each
regime conditional on all information. Shaped (k_regimes, nobs). | cy_kim_smoother_log | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def k_params(self):
"""
(int) Number of parameters in the model
"""
return self.parameters.k_params | (int) Number of parameters in the model | k_params | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def initialize_steady_state(self):
"""
Set initialization of regime probabilities to be steady-state values
Notes
-----
Only valid if there are not time-varying transition probabilities.
"""
if self.tvtp:
raise ValueError('Cannot use steady-state initialization when'
' the regime transition matrix is time-varying.')
self._initialization = 'steady-state'
self._initial_probabilities = None | Set initialization of regime probabilities to be steady-state values
Notes
-----
Only valid if there are not time-varying transition probabilities. | initialize_steady_state | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def initialize_known(self, probabilities, tol=1e-8):
"""
Set initialization of regime probabilities to use known values
"""
self._initialization = 'known'
probabilities = np.array(probabilities, ndmin=1)
if not probabilities.shape == (self.k_regimes,):
raise ValueError('Initial probabilities must be a vector of shape'
' (k_regimes,).')
if not np.abs(np.sum(probabilities) - 1) < tol:
raise ValueError('Initial probabilities vector must sum to one.')
self._initial_probabilities = probabilities | Set initialization of regime probabilities to use known values | initialize_known | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def initial_probabilities(self, params, regime_transition=None):
"""
Retrieve initial probabilities
"""
params = np.array(params, ndmin=1)
if self._initialization == 'steady-state':
if regime_transition is None:
regime_transition = self.regime_transition_matrix(params)
if regime_transition.ndim == 3:
regime_transition = regime_transition[..., 0]
m = regime_transition.shape[0]
A = np.c_[(np.eye(m) - regime_transition).T, np.ones(m)].T
try:
probabilities = np.linalg.pinv(A)[:, -1]
except np.linalg.LinAlgError:
raise RuntimeError('Steady-state probabilities could not be'
' constructed.')
elif self._initialization == 'known':
probabilities = self._initial_probabilities
else:
raise RuntimeError('Invalid initialization method selected.')
# Slightly bound probabilities away from zero (for filters in log
# space)
probabilities = np.maximum(probabilities, 1e-20)
return probabilities | Retrieve initial probabilities | initial_probabilities | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def regime_transition_matrix(self, params, exog_tvtp=None):
"""
Construct the left-stochastic transition matrix
Notes
-----
This matrix will either be shaped (k_regimes, k_regimes, 1) or if there
are time-varying transition probabilities, it will be shaped
(k_regimes, k_regimes, nobs).
The (i,j)th element of this matrix is the probability of transitioning
from regime j to regime i; thus the previous regime is represented in a
column and the next regime is represented by a row.
It is left-stochastic, meaning that each column sums to one (because
it is certain that from one regime (j) you will transition to *some
other regime*).
"""
params = np.array(params, ndmin=1)
if not self.tvtp:
regime_transition_matrix = np.zeros(
(self.k_regimes, self.k_regimes, 1),
dtype=np.promote_types(np.float64, params.dtype))
regime_transition_matrix[:-1, :, 0] = np.reshape(
params[self.parameters['regime_transition']],
(self.k_regimes-1, self.k_regimes))
regime_transition_matrix[-1, :, 0] = (
1 - np.sum(regime_transition_matrix[:-1, :, 0], axis=0))
else:
regime_transition_matrix = (
self._regime_transition_matrix_tvtp(params, exog_tvtp))
return regime_transition_matrix | Construct the left-stochastic transition matrix
Notes
-----
This matrix will either be shaped (k_regimes, k_regimes, 1) or if there
are time-varying transition probabilities, it will be shaped
(k_regimes, k_regimes, nobs).
The (i,j)th element of this matrix is the probability of transitioning
from regime j to regime i; thus the previous regime is represented in a
column and the next regime is represented by a row.
It is left-stochastic, meaning that each column sums to one (because
it is certain that from one regime (j) you will transition to *some
other regime*). | regime_transition_matrix | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def predict(self, params, start=None, end=None, probabilities=None,
conditional=False):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
params : ndarray
Parameters at which to form predictions
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
probabilities : str or array_like, optional
Specifies the weighting probabilities used in constructing the
prediction as a weighted average. If a string, can be 'predicted',
'filtered', or 'smoothed'. Otherwise can be an array of
probabilities to use. Default is smoothed.
conditional : bool or int, optional
Whether or not to return predictions conditional on current or
past regimes. If False, returns a single vector of weighted
predictions. If True or 1, returns predictions conditional on the
current regime. For larger integers, returns predictions
conditional on the current regime and some number of past regimes.
Returns
-------
predict : ndarray
Array of out of in-sample predictions and / or out-of-sample
forecasts.
"""
if start is None:
start = self._index[0]
# Handle start, end
start, end, out_of_sample, prediction_index = (
self._get_prediction_index(start, end))
if out_of_sample > 0:
raise NotImplementedError
# Perform in-sample prediction
predict = self.predict_conditional(params)
squeezed = np.squeeze(predict)
# Check if we need to do weighted averaging
if squeezed.ndim - 1 > conditional:
# Determine in-sample weighting probabilities
if probabilities is None or probabilities == 'smoothed':
results = self.smooth(params, return_raw=True)
probabilities = results.smoothed_joint_probabilities
elif probabilities == 'filtered':
results = self.filter(params, return_raw=True)
probabilities = results.filtered_joint_probabilities
elif probabilities == 'predicted':
results = self.filter(params, return_raw=True)
probabilities = results.predicted_joint_probabilities
# Compute weighted average
predict = (predict * probabilities)
for i in range(predict.ndim - 1 - int(conditional)):
predict = np.sum(predict, axis=-2)
else:
predict = squeezed
return predict[start:end + out_of_sample + 1] | In-sample prediction and out-of-sample forecasting
Parameters
----------
params : ndarray
Parameters at which to form predictions
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
probabilities : str or array_like, optional
Specifies the weighting probabilities used in constructing the
prediction as a weighted average. If a string, can be 'predicted',
'filtered', or 'smoothed'. Otherwise can be an array of
probabilities to use. Default is smoothed.
conditional : bool or int, optional
Whether or not to return predictions conditional on current or
past regimes. If False, returns a single vector of weighted
predictions. If True or 1, returns predictions conditional on the
current regime. For larger integers, returns predictions
conditional on the current regime and some number of past regimes.
Returns
-------
predict : ndarray
Array of out of in-sample predictions and / or out-of-sample
forecasts. | predict | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def predict_conditional(self, params):
"""
In-sample prediction, conditional on the current, and possibly past,
regimes
Parameters
----------
params : array_like
Array of parameters at which to perform prediction.
Returns
-------
predict : array_like
Array of predictions conditional on current, and possibly past,
regimes
"""
raise NotImplementedError | In-sample prediction, conditional on the current, and possibly past,
regimes
Parameters
----------
params : array_like
Array of parameters at which to perform prediction.
Returns
-------
predict : array_like
Array of predictions conditional on current, and possibly past,
regimes | predict_conditional | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def _conditional_loglikelihoods(self, params):
"""
Compute likelihoods conditional on the current period's regime (and
the last self.order periods' regimes if self.order > 0).
Must be implemented in subclasses.
"""
raise NotImplementedError | Compute likelihoods conditional on the current period's regime (and
the last self.order periods' regimes if self.order > 0).
Must be implemented in subclasses. | _conditional_loglikelihoods | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def filter(self, params, transformed=True, cov_type=None, cov_kwds=None,
return_raw=False, results_class=None,
results_wrapper_class=None):
"""
Apply the Hamilton filter
Parameters
----------
params : array_like
Array of parameters at which to perform filtering.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
cov_type : str, optional
See `fit` for a description of covariance matrix types
for results object.
cov_kwds : dict or None, optional
See `fit` for a description of required keywords for alternative
covariance estimators
return_raw : bool,optional
Whether or not to return only the raw Hamilton filter output or a
full results object. Default is to return a full results object.
results_class : type, optional
A results class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
results_wrapper_class : type, optional
A results wrapper class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
Returns
-------
MarkovSwitchingResults
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
# Save the parameter names
self.data.param_names = self.param_names
# Get the result
names = ['regime_transition', 'initial_probabilities',
'conditional_loglikelihoods',
'filtered_marginal_probabilities',
'predicted_joint_probabilities', 'joint_loglikelihoods',
'filtered_joint_probabilities',
'predicted_joint_probabilities_log',
'filtered_joint_probabilities_log']
result = HamiltonFilterResults(
self, Bunch(**dict(zip(names, self._filter(params)))))
# Wrap in a results object
return self._wrap_results(params, result, return_raw, cov_type,
cov_kwds, results_class,
results_wrapper_class) | Apply the Hamilton filter
Parameters
----------
params : array_like
Array of parameters at which to perform filtering.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
cov_type : str, optional
See `fit` for a description of covariance matrix types
for results object.
cov_kwds : dict or None, optional
See `fit` for a description of required keywords for alternative
covariance estimators
return_raw : bool,optional
Whether or not to return only the raw Hamilton filter output or a
full results object. Default is to return a full results object.
results_class : type, optional
A results class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
results_wrapper_class : type, optional
A results wrapper class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
Returns
-------
MarkovSwitchingResults | filter | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def smooth(self, params, transformed=True, cov_type=None, cov_kwds=None,
return_raw=False, results_class=None,
results_wrapper_class=None):
"""
Apply the Kim smoother and Hamilton filter
Parameters
----------
params : array_like
Array of parameters at which to perform filtering.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
cov_type : str, optional
See `fit` for a description of covariance matrix types
for results object.
cov_kwds : dict or None, optional
See `fit` for a description of required keywords for alternative
covariance estimators
return_raw : bool,optional
Whether or not to return only the raw Hamilton filter output or a
full results object. Default is to return a full results object.
results_class : type, optional
A results class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
results_wrapper_class : type, optional
A results wrapper class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
Returns
-------
MarkovSwitchingResults
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
# Save the parameter names
self.data.param_names = self.param_names
# Hamilton filter
# TODO add option to filter to return logged values so that we do not
# need to re-log them for smoother
names = ['regime_transition', 'initial_probabilities',
'conditional_loglikelihoods',
'filtered_marginal_probabilities',
'predicted_joint_probabilities', 'joint_loglikelihoods',
'filtered_joint_probabilities',
'predicted_joint_probabilities_log',
'filtered_joint_probabilities_log']
result = Bunch(**dict(zip(names, self._filter(params))))
# Kim smoother
out = self._smooth(params, result.predicted_joint_probabilities_log,
result.filtered_joint_probabilities_log)
result['smoothed_joint_probabilities'] = out[0]
result['smoothed_marginal_probabilities'] = out[1]
result = KimSmootherResults(self, result)
# Wrap in a results object
return self._wrap_results(params, result, return_raw, cov_type,
cov_kwds, results_class,
results_wrapper_class) | Apply the Kim smoother and Hamilton filter
Parameters
----------
params : array_like
Array of parameters at which to perform filtering.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
cov_type : str, optional
See `fit` for a description of covariance matrix types
for results object.
cov_kwds : dict or None, optional
See `fit` for a description of required keywords for alternative
covariance estimators
return_raw : bool,optional
Whether or not to return only the raw Hamilton filter output or a
full results object. Default is to return a full results object.
results_class : type, optional
A results class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
results_wrapper_class : type, optional
A results wrapper class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
Returns
-------
MarkovSwitchingResults | smooth | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def loglikeobs(self, params, transformed=True):
"""
Loglikelihood evaluation for each period
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
results = self._filter(params)
return results[5] | Loglikelihood evaluation for each period
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True. | loglikeobs | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def loglike(self, params, transformed=True):
"""
Loglikelihood evaluation
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
"""
return np.sum(self.loglikeobs(params, transformed)) | Loglikelihood evaluation
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True. | loglike | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def score(self, params, transformed=True):
"""
Compute the score function at params.
Parameters
----------
params : array_like
Array of parameters at which to evaluate the score
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
return approx_fprime_cs(params, self.loglike, args=(transformed,)) | Compute the score function at params.
Parameters
----------
params : array_like
Array of parameters at which to evaluate the score
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True. | score | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def score_obs(self, params, transformed=True):
"""
Compute the score per observation, evaluated at params
Parameters
----------
params : array_like
Array of parameters at which to evaluate the score
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
return approx_fprime_cs(params, self.loglikeobs, args=(transformed,)) | Compute the score per observation, evaluated at params
Parameters
----------
params : array_like
Array of parameters at which to evaluate the score
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True. | score_obs | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def hessian(self, params, transformed=True):
"""
Hessian matrix of the likelihood function, evaluated at the given
parameters
Parameters
----------
params : array_like
Array of parameters at which to evaluate the Hessian
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
return approx_hess_cs(params, self.loglike) | Hessian matrix of the likelihood function, evaluated at the given
parameters
Parameters
----------
params : array_like
Array of parameters at which to evaluate the Hessian
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True. | hessian | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def fit(self, start_params=None, transformed=True, cov_type='approx',
cov_kwds=None, method='bfgs', maxiter=100, full_output=1, disp=0,
callback=None, return_params=False, em_iter=5, search_reps=0,
search_iter=5, search_scale=1., **kwargs):
"""
Fits the model by maximum likelihood via Hamilton filter.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
cov_type : str, optional
The type of covariance matrix estimator to use. Can be one of
'approx', 'opg', 'robust', or 'none'. Default is 'approx'.
cov_kwds : dict or None, optional
Keywords for alternative covariance estimators
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
em_iter : int, optional
Number of initial EM iteration steps used to improve starting
parameters.
search_reps : int, optional
Number of randomly drawn search parameters that are drawn around
`start_params` to try and improve starting parameters. Default is
0.
search_iter : int, optional
Number of initial EM iteration steps used to improve each of the
search parameter repetitions.
search_scale : float or array, optional.
Scale of variates for random start parameter search.
**kwargs
Additional keyword arguments to pass to the optimizer.
Returns
-------
MarkovSwitchingResults
"""
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
# Random search for better start parameters
if search_reps > 0:
start_params = self._start_params_search(
search_reps, start_params=start_params,
transformed=transformed, em_iter=search_iter,
scale=search_scale)
transformed = True
# Get better start params through EM algorithm
if em_iter and not self.tvtp:
start_params = self._fit_em(start_params, transformed=transformed,
maxiter=em_iter, tolerance=0,
return_params=True)
transformed = True
if transformed:
start_params = self.untransform_params(start_params)
# Maximum likelihood estimation by scoring
fargs = (False,)
mlefit = super().fit(start_params, method=method,
fargs=fargs,
maxiter=maxiter,
full_output=full_output,
disp=disp, callback=callback,
skip_hessian=True, **kwargs)
# Just return the fitted parameters if requested
if return_params:
result = self.transform_params(mlefit.params)
# Otherwise construct the results class if desired
else:
result = self.smooth(mlefit.params, transformed=False,
cov_type=cov_type, cov_kwds=cov_kwds)
result.mlefit = mlefit
result.mle_retvals = mlefit.mle_retvals
result.mle_settings = mlefit.mle_settings
return result | Fits the model by maximum likelihood via Hamilton filter.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
cov_type : str, optional
The type of covariance matrix estimator to use. Can be one of
'approx', 'opg', 'robust', or 'none'. Default is 'approx'.
cov_kwds : dict or None, optional
Keywords for alternative covariance estimators
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
em_iter : int, optional
Number of initial EM iteration steps used to improve starting
parameters.
search_reps : int, optional
Number of randomly drawn search parameters that are drawn around
`start_params` to try and improve starting parameters. Default is
0.
search_iter : int, optional
Number of initial EM iteration steps used to improve each of the
search parameter repetitions.
search_scale : float or array, optional.
Scale of variates for random start parameter search.
**kwargs
Additional keyword arguments to pass to the optimizer.
Returns
-------
MarkovSwitchingResults | fit | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def _fit_em(self, start_params=None, transformed=True, cov_type='none',
cov_kwds=None, maxiter=50, tolerance=1e-6, full_output=True,
return_params=False, **kwargs):
"""
Fits the model using the Expectation-Maximization (EM) algorithm
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by `start_params`.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
cov_type : str, optional
The type of covariance matrix estimator to use. Can be one of
'approx', 'opg', 'robust', or 'none'. Default is 'none'.
cov_kwds : dict or None, optional
Keywords for alternative covariance estimators
maxiter : int, optional
The maximum number of iterations to perform.
tolerance : float, optional
The iteration stops when the difference between subsequent
loglikelihood values is less than this tolerance.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. This includes all intermediate values for
parameters and loglikelihood values
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
**kwargs
Additional keyword arguments to pass to the optimizer.
Notes
-----
This is a private method for finding good starting parameters for MLE
by scoring. It has not been tested for a thoroughly correct EM
implementation in all cases. It does not support TVTP transition
probabilities.
Returns
-------
MarkovSwitchingResults
"""
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
if not transformed:
start_params = self.transform_params(start_params)
# Perform expectation-maximization
llf = []
params = [start_params]
i = 0
delta = 0
while i < maxiter and (i < 2 or (delta > tolerance)):
out = self._em_iteration(params[-1])
llf.append(out[0].llf)
params.append(out[1])
if i > 0:
delta = 2 * (llf[-1] - llf[-2]) / np.abs(llf[-1] + llf[-2])
i += 1
# Just return the fitted parameters if requested
if return_params:
result = params[-1]
# Otherwise construct the results class if desired
else:
result = self.filter(params[-1], transformed=True,
cov_type=cov_type, cov_kwds=cov_kwds)
# Save the output
if full_output:
em_retvals = Bunch(**{'params': np.array(params),
'llf': np.array(llf),
'iter': i})
em_settings = Bunch(**{'tolerance': tolerance,
'maxiter': maxiter})
else:
em_retvals = None
em_settings = None
result.mle_retvals = em_retvals
result.mle_settings = em_settings
return result | Fits the model using the Expectation-Maximization (EM) algorithm
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by `start_params`.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
cov_type : str, optional
The type of covariance matrix estimator to use. Can be one of
'approx', 'opg', 'robust', or 'none'. Default is 'none'.
cov_kwds : dict or None, optional
Keywords for alternative covariance estimators
maxiter : int, optional
The maximum number of iterations to perform.
tolerance : float, optional
The iteration stops when the difference between subsequent
loglikelihood values is less than this tolerance.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. This includes all intermediate values for
parameters and loglikelihood values
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
**kwargs
Additional keyword arguments to pass to the optimizer.
Notes
-----
This is a private method for finding good starting parameters for MLE
by scoring. It has not been tested for a thoroughly correct EM
implementation in all cases. It does not support TVTP transition
probabilities.
Returns
-------
MarkovSwitchingResults | _fit_em | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def _em_iteration(self, params0):
"""
EM iteration
Notes
-----
The EM iteration in this base class only performs the EM step for
non-TVTP transition probabilities.
"""
params1 = np.zeros(params0.shape,
dtype=np.promote_types(np.float64, params0.dtype))
# Smooth at the given parameters
result = self.smooth(params0, transformed=True, return_raw=True)
# The EM with TVTP is not yet supported, just return the previous
# iteration parameters
if self.tvtp:
params1[self.parameters['regime_transition']] = (
params0[self.parameters['regime_transition']])
else:
regime_transition = self._em_regime_transition(result)
for i in range(self.k_regimes):
params1[self.parameters[i, 'regime_transition']] = (
regime_transition[i])
return result, params1 | EM iteration
Notes
-----
The EM iteration in this base class only performs the EM step for
non-TVTP transition probabilities. | _em_iteration | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def _em_regime_transition(self, result):
"""
EM step for regime transition probabilities
"""
# Marginalize the smoothed joint probabilities to just S_t, S_{t-1} | T
tmp = result.smoothed_joint_probabilities
for i in range(tmp.ndim - 3):
tmp = np.sum(tmp, -2)
smoothed_joint_probabilities = tmp
# Transition parameters (recall we're not yet supporting TVTP here)
k_transition = len(self.parameters[0, 'regime_transition'])
regime_transition = np.zeros((self.k_regimes, k_transition))
for i in range(self.k_regimes): # S_{t_1}
for j in range(self.k_regimes - 1): # S_t
regime_transition[i, j] = (
np.sum(smoothed_joint_probabilities[j, i]) /
np.sum(result.smoothed_marginal_probabilities[i]))
# It may be the case that due to rounding error this estimates
# transition probabilities that sum to greater than one. If so,
# re-scale the probabilities and warn the user that something
# is not quite right
delta = np.sum(regime_transition[i]) - 1
if delta > 0:
warnings.warn('Invalid regime transition probabilities'
' estimated in EM iteration; probabilities have'
' been re-scaled to continue estimation.',
EstimationWarning)
regime_transition[i] /= 1 + delta + 1e-6
return regime_transition | EM step for regime transition probabilities | _em_regime_transition | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def _start_params_search(self, reps, start_params=None, transformed=True,
em_iter=5, scale=1.):
"""
Search for starting parameters as random permutations of a vector
Parameters
----------
reps : int
Number of random permutations to try.
start_params : ndarray, optional
Starting parameter vector. If not given, class-level start
parameters are used.
transformed : bool, optional
If `start_params` was provided, whether or not those parameters
are already transformed. Default is True.
em_iter : int, optional
Number of EM iterations to apply to each random permutation.
scale : array or float, optional
Scale of variates for random start parameter search. Can be given
as an array of length equal to the number of parameters or as a
single scalar.
Notes
-----
This is a private method for finding good starting parameters for MLE
by scoring, where the defaults have been set heuristically.
"""
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
# Random search is over untransformed space
if transformed:
start_params = self.untransform_params(start_params)
# Construct the standard deviations
scale = np.array(scale, ndmin=1)
if scale.size == 1:
scale = np.ones(self.k_params) * scale
if not scale.size == self.k_params:
raise ValueError('Scale of variates for random start'
' parameter search must be given for each'
' parameter or as a single scalar.')
# Construct the random variates
variates = np.zeros((reps, self.k_params))
for i in range(self.k_params):
variates[:, i] = scale[i] * np.random.uniform(-0.5, 0.5, size=reps)
llf = self.loglike(start_params, transformed=False)
params = start_params
for i in range(reps):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
proposed_params = self._fit_em(
start_params + variates[i], transformed=False,
maxiter=em_iter, return_params=True)
proposed_llf = self.loglike(proposed_params)
if proposed_llf > llf:
llf = proposed_llf
params = self.untransform_params(proposed_params)
except Exception: # FIXME: catch something specific
pass
# Return transformed parameters
return self.transform_params(params) | Search for starting parameters as random permutations of a vector
Parameters
----------
reps : int
Number of random permutations to try.
start_params : ndarray, optional
Starting parameter vector. If not given, class-level start
parameters are used.
transformed : bool, optional
If `start_params` was provided, whether or not those parameters
are already transformed. Default is True.
em_iter : int, optional
Number of EM iterations to apply to each random permutation.
scale : array or float, optional
Scale of variates for random start parameter search. Can be given
as an array of length equal to the number of parameters or as a
single scalar.
Notes
-----
This is a private method for finding good starting parameters for MLE
by scoring, where the defaults have been set heuristically. | _start_params_search | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def start_params(self):
"""
(array) Starting parameters for maximum likelihood estimation.
"""
params = np.zeros(self.k_params, dtype=np.float64)
# Transition probabilities
if self.tvtp:
params[self.parameters['regime_transition']] = 0.
else:
params[self.parameters['regime_transition']] = 1. / self.k_regimes
return params | (array) Starting parameters for maximum likelihood estimation. | start_params | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def param_names(self):
"""
(list of str) List of human readable parameter names (for parameters
actually included in the model).
"""
param_names = np.zeros(self.k_params, dtype=object)
# Transition probabilities
if self.tvtp:
# TODO add support for exog_tvtp_names
param_names[self.parameters['regime_transition']] = [
'p[%d->%d].tvtp%d' % (j, i, k)
for i in range(self.k_regimes-1)
for k in range(self.k_tvtp)
for j in range(self.k_regimes)
]
else:
param_names[self.parameters['regime_transition']] = [
'p[%d->%d]' % (j, i)
for i in range(self.k_regimes-1)
for j in range(self.k_regimes)]
return param_names.tolist() | (list of str) List of human readable parameter names (for parameters
actually included in the model). | param_names | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evaluation.
Notes
-----
In the base class, this only transforms the transition-probability-
related parameters.
"""
constrained = np.array(unconstrained, copy=True)
constrained = constrained.astype(
np.promote_types(np.float64, constrained.dtype))
# Nothing to do for transition probabilities if TVTP
if self.tvtp:
constrained[self.parameters['regime_transition']] = (
unconstrained[self.parameters['regime_transition']])
# Otherwise do logistic transformation
else:
# Transition probabilities
for i in range(self.k_regimes):
tmp1 = unconstrained[self.parameters[i, 'regime_transition']]
tmp2 = np.r_[0, tmp1]
constrained[self.parameters[i, 'regime_transition']] = np.exp(
tmp1 - logsumexp(tmp2))
# Do not do anything for the rest of the parameters
return constrained | Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evaluation.
Notes
-----
In the base class, this only transforms the transition-probability-
related parameters. | transform_params | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def _untransform_logistic(self, unconstrained, constrained):
"""
Function to allow using a numerical root-finder to reverse the
logistic transform.
"""
resid = np.zeros(unconstrained.shape, dtype=unconstrained.dtype)
exp = np.exp(unconstrained)
sum_exp = np.sum(exp)
for i in range(len(unconstrained)):
resid[i] = (unconstrained[i] -
np.log(1 + sum_exp - exp[i]) +
np.log(1 / constrained[i] - 1))
return resid | Function to allow using a numerical root-finder to reverse the
logistic transform. | _untransform_logistic | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evaluation, to
be transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
Notes
-----
In the base class, this only untransforms the transition-probability-
related parameters.
"""
unconstrained = np.array(constrained, copy=True)
unconstrained = unconstrained.astype(
np.promote_types(np.float64, unconstrained.dtype))
# Nothing to do for transition probabilities if TVTP
if self.tvtp:
unconstrained[self.parameters['regime_transition']] = (
constrained[self.parameters['regime_transition']])
# Otherwise reverse logistic transformation
else:
for i in range(self.k_regimes):
s = self.parameters[i, 'regime_transition']
if self.k_regimes == 2:
unconstrained[s] = -np.log(1. / constrained[s] - 1)
else:
from scipy.optimize import root
out = root(self._untransform_logistic,
np.zeros(unconstrained[s].shape,
unconstrained.dtype),
args=(constrained[s],))
if not out['success']:
raise ValueError('Could not untransform parameters.')
unconstrained[s] = out['x']
# Do not do anything for the rest of the parameters
return unconstrained | Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evaluation, to
be transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
Notes
-----
In the base class, this only untransforms the transition-probability-
related parameters. | untransform_params | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def expected_durations(self):
"""
(array) Expected duration of a regime, possibly time-varying.
"""
# It is possible that we will have a degenerate system, so that there
# is no possibility of transitioning to a different state. In that
# case, we do want the expected duration of one state to be np.inf,
# and the expected duration of the other states to be np.nan
diag = np.diagonal(self.regime_transition)
expected_durations = np.zeros_like(diag)
degenerate = np.any(diag == 1, axis=1)
# For non-degenerate states, use the usual computation
expected_durations[~degenerate] = 1 / (1 - diag[~degenerate])
# For degenerate states, everything is np.nan, except for the one
# state that is np.inf.
expected_durations[degenerate] = np.nan
expected_durations[diag == 1] = np.inf
return expected_durations.squeeze() | (array) Expected duration of a regime, possibly time-varying. | expected_durations | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def aic(self):
"""
(float) Akaike Information Criterion
"""
# return -2*self.llf + 2*self.params.shape[0]
return aic(self.llf, self.nobs, self.params.shape[0]) | (float) Akaike Information Criterion | aic | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def bic(self):
"""
(float) Bayes Information Criterion
"""
# return -2*self.llf + self.params.shape[0]*np.log(self.nobs)
return bic(self.llf, self.nobs, self.params.shape[0]) | (float) Bayes Information Criterion | bic | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def cov_params_approx(self):
"""
(array) The variance / covariance matrix. Computed using the numerical
Hessian approximated by complex step or finite differences methods.
"""
evaluated_hessian = self.model.hessian(self.params, transformed=True)
neg_cov, singular_values = pinv_extended(evaluated_hessian)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return -neg_cov | (array) The variance / covariance matrix. Computed using the numerical
Hessian approximated by complex step or finite differences methods. | cov_params_approx | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def cov_params_opg(self):
"""
(array) The variance / covariance matrix. Computed using the outer
product of gradients method.
"""
score_obs = self.model.score_obs(self.params, transformed=True).T
cov_params, singular_values = pinv_extended(
np.inner(score_obs, score_obs))
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return cov_params | (array) The variance / covariance matrix. Computed using the outer
product of gradients method. | cov_params_opg | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def cov_params_robust(self):
"""
(array) The QMLE variance / covariance matrix. Computed using the
numerical Hessian as the evaluated hessian.
"""
cov_opg = self.cov_params_opg
evaluated_hessian = self.model.hessian(self.params, transformed=True)
cov_params, singular_values = pinv_extended(
np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian)
)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return cov_params | (array) The QMLE variance / covariance matrix. Computed using the
numerical Hessian as the evaluated hessian. | cov_params_robust | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def fittedvalues(self):
"""
(array) The predicted values of the model. An (nobs x k_endog) array.
"""
return self.model.predict(self.params) | (array) The predicted values of the model. An (nobs x k_endog) array. | fittedvalues | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def hqic(self):
"""
(float) Hannan-Quinn Information Criterion
"""
# return -2*self.llf + 2*np.log(np.log(self.nobs))*self.params.shape[0]
return hqic(self.llf, self.nobs, self.params.shape[0]) | (float) Hannan-Quinn Information Criterion | hqic | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def llf_obs(self):
"""
(float) The value of the log-likelihood function evaluated at `params`.
"""
return self.model.loglikeobs(self.params) | (float) The value of the log-likelihood function evaluated at `params`. | llf_obs | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def llf(self):
"""
(float) The value of the log-likelihood function evaluated at `params`.
"""
return self.model.loglike(self.params) | (float) The value of the log-likelihood function evaluated at `params`. | llf | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def resid(self):
"""
(array) The model residuals. An (nobs x k_endog) array.
"""
return self.model.endog - self.fittedvalues | (array) The model residuals. An (nobs x k_endog) array. | resid | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def predict(self, start=None, end=None, probabilities=None,
conditional=False):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
probabilities : str or array_like, optional
Specifies the weighting probabilities used in constructing the
prediction as a weighted average. If a string, can be 'predicted',
'filtered', or 'smoothed'. Otherwise can be an array of
probabilities to use. Default is smoothed.
conditional : bool or int, optional
Whether or not to return predictions conditional on current or
past regimes. If False, returns a single vector of weighted
predictions. If True or 1, returns predictions conditional on the
current regime. For larger integers, returns predictions
conditional on the current regime and some number of past regimes.
Returns
-------
predict : ndarray
Array of out of in-sample predictions and / or out-of-sample
forecasts. An (npredict x k_endog) array.
"""
return self.model.predict(self.params, start=start, end=end,
probabilities=probabilities,
conditional=conditional) | In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
probabilities : str or array_like, optional
Specifies the weighting probabilities used in constructing the
prediction as a weighted average. If a string, can be 'predicted',
'filtered', or 'smoothed'. Otherwise can be an array of
probabilities to use. Default is smoothed.
conditional : bool or int, optional
Whether or not to return predictions conditional on current or
past regimes. If False, returns a single vector of weighted
predictions. If True or 1, returns predictions conditional on the
current regime. For larger integers, returns predictions
conditional on the current regime and some number of past regimes.
Returns
-------
predict : ndarray
Array of out of in-sample predictions and / or out-of-sample
forecasts. An (npredict x k_endog) array. | predict | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
def forecast(self, steps=1, **kwargs):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, str, or datetime, optional
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency, steps
must be an integer. Default
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : ndarray
Array of out of sample forecasts. A (steps x k_endog) array.
"""
raise NotImplementedError | Out-of-sample forecasts
Parameters
----------
steps : int, str, or datetime, optional
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency, steps
must be an integer. Default
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : ndarray
Array of out of sample forecasts. A (steps x k_endog) array. | forecast | python | statsmodels/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/regime_switching/markov_switching.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.