code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def load():
"""
Load the statecrime data and return a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
return load_pandas() | Load the statecrime data and return a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information. | load | python | statsmodels/statsmodels | statsmodels/datasets/statecrime/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/datasets/statecrime/data.py | BSD-3-Clause |
def load():
"""
Load the West German interest/inflation data and return a Dataset class.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The interest_inflation Dataset instance does not contain endog and exog
attributes.
"""
return load_pandas() | Load the West German interest/inflation data and return a Dataset class.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The interest_inflation Dataset instance does not contain endog and exog
attributes. | load | python | statsmodels/statsmodels | statsmodels/datasets/interest_inflation/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/datasets/interest_inflation/data.py | BSD-3-Clause |
def load():
"""
Load the Spector dataset and returns a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
return load_pandas() | Load the Spector dataset and returns a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information. | load | python | statsmodels/statsmodels | statsmodels/datasets/spector/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/datasets/spector/data.py | BSD-3-Clause |
def load():
"""
Load the data modechoice data and return a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
return load_pandas() | Load the data modechoice data and return a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information. | load | python | statsmodels/statsmodels | statsmodels/datasets/modechoice/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/datasets/modechoice/data.py | BSD-3-Clause |
def load():
"""
Load the Scotvote data and returns a Dataset instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
return load_pandas() | Load the Scotvote data and returns a Dataset instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information. | load | python | statsmodels/statsmodels | statsmodels/datasets/scotland/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/datasets/scotland/data.py | BSD-3-Clause |
def _find_x12(x12path=None, prefer_x13=True):
"""
If x12path is not given, then either x13as[.exe] or x12a[.exe] must
be found on the PATH. Otherwise, the environmental variable X12PATH or
X13PATH must be defined. If prefer_x13 is True, only X13PATH is searched
for. If it is false, only X12PATH is searched for.
"""
global _binary_names
if x12path is not None and x12path.endswith(_binary_names):
# remove binary from path if path is not a directory
if not os.path.isdir(x12path):
x12path = os.path.dirname(x12path)
if not prefer_x13: # search for x12 first
_binary_names = _binary_names[::-1]
if x12path is None:
x12path = os.getenv("X12PATH", "")
if not x12path:
x12path = os.getenv("X13PATH", "")
elif x12path is None:
x12path = os.getenv("X13PATH", "")
if not x12path:
x12path = os.getenv("X12PATH", "")
for binary in _binary_names:
x12 = os.path.join(x12path, binary)
try:
subprocess.check_call(x12, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return x12
except OSError:
pass
else:
return False | If x12path is not given, then either x13as[.exe] or x12a[.exe] must
be found on the PATH. Otherwise, the environmental variable X12PATH or
X13PATH must be defined. If prefer_x13 is True, only X13PATH is searched
for. If it is false, only X12PATH is searched for. | _find_x12 | python | statsmodels/statsmodels | statsmodels/tsa/x13.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/x13.py | BSD-3-Clause |
def _clean_order(order):
"""
Takes something like (1 1 0)(0 1 1) and returns a arma order, sarma
order tuple. Also accepts (1 1 0) and return arma order and (0, 0, 0)
"""
order = re.findall(r"\([0-9 ]*?\)", order)
def clean(x):
return tuple(map(int, re.sub("[()]", "", x).split(" ")))
if len(order) > 1:
order, sorder = map(clean, order)
else:
order = clean(order[0])
sorder = (0, 0, 0)
return order, sorder | Takes something like (1 1 0)(0 1 1) and returns a arma order, sarma
order tuple. Also accepts (1 1 0) and return arma order and (0, 0, 0) | _clean_order | python | statsmodels/statsmodels | statsmodels/tsa/x13.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/x13.py | BSD-3-Clause |
def _convert_out_to_series(x, dates, name):
"""
Convert x to a DataFrame where x is a string in the format given by
x-13arima-seats output.
"""
from io import StringIO
from pandas import read_csv
out = read_csv(StringIO(x), skiprows=2, header=None, sep="\t", engine="python")
return out.set_index(dates).rename(columns={1: name})[name] | Convert x to a DataFrame where x is a string in the format given by
x-13arima-seats output. | _convert_out_to_series | python | statsmodels/statsmodels | statsmodels/tsa/x13.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/x13.py | BSD-3-Clause |
def x13_arima_analysis(
endog,
maxorder=(2, 1),
maxdiff=(2, 1),
diff=None,
exog=None,
log=None,
outlier=True,
trading=False,
forecast_periods=None,
retspec=False,
speconly=False,
start=None,
freq=None,
print_stdout=False,
x12path=None,
prefer_x13=True,
log_diagnostics=False,
tempdir=None,
):
"""
Perform x13-arima analysis for monthly or quarterly data.
Parameters
----------
endog : array_like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seasonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array_like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_periods : int
Number of forecasts produced. The default is None.
retspec : bool
Whether to return the created specification file. Can be useful for
debugging.
speconly : bool
Whether to create the specification file and then return it without
performing the analysis. Can be useful for debugging.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accepted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or
X12PATH depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
log_diagnostics : bool
If True, returns D8 F-Test, M07, and Q diagnostics from the X13
savelog. Set to False by default.
tempdir : str
The path to where temporary files are created by the function.
If None, files are created in the default temporary file location.
Returns
-------
Bunch
A bunch object containing the listed attributes.
- results : str
The full output from the X12/X13 run.
- seasadj : pandas.Series
The final seasonally adjusted ``endog``.
- trend : pandas.Series
The trend-cycle component of ``endog``.
- irregular : pandas.Series
The final irregular component of ``endog``.
- stdout : str
The captured stdout produced by x12/x13.
- spec : str, optional
Returned if ``retspec`` is True. The only thing returned if
``speconly`` is True.
- x13_diagnostic : dict
Returns F-D8, M07, and Q metrics if True. Returns dict with no
metrics if False
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output
directory, invoking exog12/X13 in a subprocess, and reading the output
back in.
"""
x12path = _check_x12(x12path)
if not isinstance(endog, (pd.DataFrame, pd.Series)):
if start is None or freq is None:
raise ValueError(
"start and freq cannot be none if endog is not " "a pandas object"
)
idx = pd.date_range(start=start, periods=len(endog), freq=freq)
endog = pd.Series(endog, index=idx)
spec_obj = pandas_to_series_spec(endog)
spec = spec_obj.create_spec()
spec += f"transform{{function={_log_to_x12[log]}}}\n"
if outlier:
spec += "outlier{}\n"
options = _make_automdl_options(maxorder, maxdiff, diff)
spec += f"automdl{{{options}}}\n"
spec += _make_regression_options(trading, exog)
spec += _make_forecast_options(forecast_periods)
spec += "x11{ save=(d11 d12 d13) \n savelog=(fd8 m7 q)}"
if speconly:
return spec
# write it to a tempfile
# TODO: make this more robust - give the user some control?
ftempin = tempfile.NamedTemporaryFile(delete=False, suffix=".spc", dir=tempdir)
ftempout = tempfile.NamedTemporaryFile(delete=False, dir=tempdir)
try:
ftempin.write(spec.encode("utf8"))
ftempin.close()
ftempout.close()
# call x12 arima
p = run_spec(x12path, ftempin.name[:-4], ftempout.name)
p.wait()
stdout = p.stdout.read()
if print_stdout:
print(p.stdout.read())
# check for errors
errors = _open_and_read(ftempout.name + ".err")
_check_errors(errors)
# read in results
results = _open_and_read(ftempout.name + ".out")
seasadj = _open_and_read(ftempout.name + ".d11")
trend = _open_and_read(ftempout.name + ".d12")
irregular = _open_and_read(ftempout.name + ".d13")
if log_diagnostics:
# read f8d m7 and q diagnostics from log
x13_logs = _open_and_read(ftempout.name + ".log")
x13_diagnostic = {
"F-D8": float(re.search(r"D8 table\s*:\s*([\d.]+)", x13_logs).group(1)),
"M07": float(re.search(r"M07\s*:\s*([\d.]+)", x13_logs).group(1)),
"Q": float(re.search(r"Q\s*:\s*([\d.]+)", x13_logs).group(1))
}
else:
x13_diagnostic = {"F-D8": "Log diagnostics not retrieved.",
"M07": "Log diagnostics not retrieved.",
"Q": "Log diagnostics not retrieved."}
finally:
try: # sometimes this gives a permission denied error?
# not sure why. no process should have these open
os.remove(ftempin.name)
os.remove(ftempout.name)
except OSError:
if os.path.exists(ftempin.name):
warn(f"Failed to delete resource {ftempin.name}", IOWarning)
if os.path.exists(ftempout.name):
warn(f"Failed to delete resource {ftempout.name}", IOWarning)
seasadj = _convert_out_to_series(seasadj, endog.index, "seasadj")
trend = _convert_out_to_series(trend, endog.index, "trend")
irregular = _convert_out_to_series(irregular, endog.index, "irregular")
# NOTE: there is not likely anything in stdout that's not in results
# so may be safe to just suppress and remove it
if not retspec:
res = X13ArimaAnalysisResult(
observed=endog,
results=results,
seasadj=seasadj,
trend=trend,
irregular=irregular,
stdout=stdout,
x13_diagnostic=x13_diagnostic,
)
else:
res = X13ArimaAnalysisResult(
observed=endog,
results=results,
seasadj=seasadj,
trend=trend,
irregular=irregular,
stdout=stdout,
spec=spec,
x13_diagnostic=x13_diagnostic,
)
return res | Perform x13-arima analysis for monthly or quarterly data.
Parameters
----------
endog : array_like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seasonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array_like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_periods : int
Number of forecasts produced. The default is None.
retspec : bool
Whether to return the created specification file. Can be useful for
debugging.
speconly : bool
Whether to create the specification file and then return it without
performing the analysis. Can be useful for debugging.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accepted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or
X12PATH depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
log_diagnostics : bool
If True, returns D8 F-Test, M07, and Q diagnostics from the X13
savelog. Set to False by default.
tempdir : str
The path to where temporary files are created by the function.
If None, files are created in the default temporary file location.
Returns
-------
Bunch
A bunch object containing the listed attributes.
- results : str
The full output from the X12/X13 run.
- seasadj : pandas.Series
The final seasonally adjusted ``endog``.
- trend : pandas.Series
The trend-cycle component of ``endog``.
- irregular : pandas.Series
The final irregular component of ``endog``.
- stdout : str
The captured stdout produced by x12/x13.
- spec : str, optional
Returned if ``retspec`` is True. The only thing returned if
``speconly`` is True.
- x13_diagnostic : dict
Returns F-D8, M07, and Q metrics if True. Returns dict with no
metrics if False
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output
directory, invoking exog12/X13 in a subprocess, and reading the output
back in. | x13_arima_analysis | python | statsmodels/statsmodels | statsmodels/tsa/x13.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/x13.py | BSD-3-Clause |
def x13_arima_select_order(
endog,
maxorder=(2, 1),
maxdiff=(2, 1),
diff=None,
exog=None,
log=None,
outlier=True,
trading=False,
forecast_periods=None,
start=None,
freq=None,
print_stdout=False,
x12path=None,
prefer_x13=True,
tempdir=None,
):
"""
Perform automatic seasonal ARIMA order identification using x12/x13 ARIMA.
Parameters
----------
endog : array_like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seasonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array_like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_periods : int
Number of forecasts produced. The default is None.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accepted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or X12PATH
depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
tempdir : str
The path to where temporary files are created by the function.
If None, files are created in the default temporary file location.
Returns
-------
Bunch
A bunch object containing the listed attributes.
- order : tuple
The regular order.
- sorder : tuple
The seasonal order.
- include_mean : bool
Whether to include a mean or not.
- results : str
The full results from the X12/X13 analysis.
- stdout : str
The captured stdout from the X12/X13 analysis.
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output back
in.
"""
results = x13_arima_analysis(
endog,
x12path=x12path,
exog=exog,
log=log,
outlier=outlier,
trading=trading,
forecast_periods=forecast_periods,
maxorder=maxorder,
maxdiff=maxdiff,
diff=diff,
start=start,
freq=freq,
prefer_x13=prefer_x13,
tempdir=tempdir,
print_stdout=print_stdout,
)
model = re.search("(?<=Final automatic model choice : ).*", results.results)
order = model.group()
if re.search("Mean is not significant", results.results):
include_mean = False
elif re.search("Constant", results.results):
include_mean = True
else:
include_mean = False
order, sorder = _clean_order(order)
res = Bunch(
order=order,
sorder=sorder,
include_mean=include_mean,
results=results.results,
stdout=results.stdout,
)
return res | Perform automatic seasonal ARIMA order identification using x12/x13 ARIMA.
Parameters
----------
endog : array_like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seasonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array_like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_periods : int
Number of forecasts produced. The default is None.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accepted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or X12PATH
depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
tempdir : str
The path to where temporary files are created by the function.
If None, files are created in the default temporary file location.
Returns
-------
Bunch
A bunch object containing the listed attributes.
- order : tuple
The regular order.
- sorder : tuple
The seasonal order.
- include_mean : bool
Whether to include a mean or not.
- results : str
The full results from the X12/X13 analysis.
- stdout : str
The captured stdout from the X12/X13 analysis.
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output back
in. | x13_arima_select_order | python | statsmodels/statsmodels | statsmodels/tsa/x13.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/x13.py | BSD-3-Clause |
def is_dummy(self) -> bool:
"""Flag indicating whether the values produced are dummy variables"""
return self._is_dummy | Flag indicating whether the values produced are dummy variables | is_dummy | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def in_sample(self, index: Sequence[Hashable]) -> pd.DataFrame:
"""
Produce deterministic trends for in-sample fitting.
Parameters
----------
index : index_like
An index-like object. If not an index, it is converted to an
index.
Returns
-------
DataFrame
A DataFrame containing the deterministic terms.
""" | Produce deterministic trends for in-sample fitting.
Parameters
----------
index : index_like
An index-like object. If not an index, it is converted to an
index.
Returns
-------
DataFrame
A DataFrame containing the deterministic terms. | in_sample | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def out_of_sample(
self,
steps: int,
index: Sequence[Hashable],
forecast_index: Optional[Sequence[Hashable]] = None,
) -> pd.DataFrame:
"""
Produce deterministic trends for out-of-sample forecasts
Parameters
----------
steps : int
The number of steps to forecast
index : index_like
An index-like object. If not an index, it is converted to an
index.
forecast_index : index_like
An Index or index-like object to use for the forecasts. If
provided must have steps elements.
Returns
-------
DataFrame
A DataFrame containing the deterministic terms.
""" | Produce deterministic trends for out-of-sample forecasts
Parameters
----------
steps : int
The number of steps to forecast
index : index_like
An index-like object. If not an index, it is converted to an
index.
forecast_index : index_like
An Index or index-like object to use for the forecasts. If
provided must have steps elements.
Returns
-------
DataFrame
A DataFrame containing the deterministic terms. | out_of_sample | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def __str__(self) -> str:
"""A meaningful string representation of the term""" | A meaningful string representation of the term | __str__ | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def _eq_attr(self) -> tuple[Hashable, ...]:
"""tuple of attributes that are used for equality comparison""" | tuple of attributes that are used for equality comparison | _eq_attr | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def _extend_index(
index: pd.Index,
steps: int,
forecast_index: Optional[Sequence[Hashable]] = None,
) -> pd.Index:
"""Extend the forecast index"""
if forecast_index is not None:
forecast_index = DeterministicTerm._index_like(forecast_index)
assert isinstance(forecast_index, pd.Index)
if forecast_index.shape[0] != steps:
raise ValueError(
"The number of values in forecast_index "
f"({forecast_index.shape[0]}) must match steps ({steps})."
)
return forecast_index
if isinstance(index, pd.PeriodIndex):
return pd.period_range(
index[-1] + 1, periods=steps, freq=index.freq
)
elif isinstance(index, pd.DatetimeIndex) and index.freq is not None:
next_obs = pd.date_range(index[-1], freq=index.freq, periods=2)[1]
return pd.date_range(next_obs, freq=index.freq, periods=steps)
elif isinstance(index, pd.RangeIndex):
assert isinstance(index, pd.RangeIndex)
try:
step = index.step
start = index.stop
except AttributeError:
# TODO: Remove after pandas min ver is 1.0.0+
step = index[-1] - index[-2] if len(index) > 1 else 1
start = index[-1] + step
stop = start + step * steps
return pd.RangeIndex(start, stop, step=step)
elif is_int_index(index) and np.all(np.diff(index) == 1):
idx_arr = np.arange(index[-1] + 1, index[-1] + steps + 1)
return pd.Index(idx_arr)
# default range index
import warnings
warnings.warn(
"Only PeriodIndexes, DatetimeIndexes with a frequency set, "
"RangesIndexes, and Index with a unit increment support "
"extending. The index is set will contain the position relative "
"to the data length.",
UserWarning,
stacklevel=2,
)
nobs = index.shape[0]
return pd.RangeIndex(nobs + 1, nobs + steps + 1) | Extend the forecast index | _extend_index | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def constant(self) -> bool:
"""Flag indicating that a constant is included"""
return self._constant | Flag indicating that a constant is included | constant | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def order(self) -> int:
"""Order of the time trend"""
return self._order | Order of the time trend | order | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def from_string(cls, trend: str) -> "TimeTrend":
"""
Create a TimeTrend from a string description.
Provided for compatibility with common string names.
Parameters
----------
trend : {"n", "c", "t", "ct", "ctt"}
The string representation of the time trend. The terms are:
* "n": No trend terms
* "c": A constant only
* "t": Linear time trend only
* "ct": A constant and a time trend
* "ctt": A constant, a time trend and a quadratic time trend
Returns
-------
TimeTrend
The TimeTrend instance.
"""
constant = trend.startswith("c")
order = 0
if "tt" in trend:
order = 2
elif "t" in trend:
order = 1
return cls(constant=constant, order=order) | Create a TimeTrend from a string description.
Provided for compatibility with common string names.
Parameters
----------
trend : {"n", "c", "t", "ct", "ctt"}
The string representation of the time trend. The terms are:
* "n": No trend terms
* "c": A constant only
* "t": Linear time trend only
* "ct": A constant and a time trend
* "ctt": A constant, a time trend and a quadratic time trend
Returns
-------
TimeTrend
The TimeTrend instance. | from_string | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def period(self) -> int:
"""The period of the seasonality"""
return self._period | The period of the seasonality | period | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def initial_period(self) -> int:
"""The seasonal index of the first observation"""
return self._initial_period | The seasonal index of the first observation | initial_period | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def from_index(
cls, index: Union[Sequence[Hashable], pd.DatetimeIndex, pd.PeriodIndex]
) -> "Seasonality":
"""
Construct a seasonality directly from an index using its frequency.
Parameters
----------
index : {DatetimeIndex, PeriodIndex}
An index with its frequency (`freq`) set.
Returns
-------
Seasonality
The initialized Seasonality instance.
"""
index = cls._index_like(index)
if isinstance(index, pd.PeriodIndex):
freq = index.freq
elif isinstance(index, pd.DatetimeIndex):
freq = index.freq if index.freq else index.inferred_freq
else:
raise TypeError("index must be a DatetimeIndex or PeriodIndex")
if freq is None:
raise ValueError("index must have a freq or inferred_freq set")
period = freq_to_period(freq)
return cls(period=period) | Construct a seasonality directly from an index using its frequency.
Parameters
----------
index : {DatetimeIndex, PeriodIndex}
An index with its frequency (`freq`) set.
Returns
-------
Seasonality
The initialized Seasonality instance. | from_index | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def order(self) -> int:
"""The order of the Fourier terms included"""
return self._order | The order of the Fourier terms included | order | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def period(self) -> float:
"""The period of the Fourier terms"""
return self._period | The period of the Fourier terms | period | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def freq(self) -> str:
"""The frequency of the deterministic terms"""
return self._freq.freqstr | The frequency of the deterministic terms | freq | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def period(self) -> str:
"""The full period"""
return self._period | The full period | period | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def base_period(self) -> Optional[str]:
"""The base period"""
return self._base_period | The base period | base_period | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def from_string(
cls,
freq: str,
trend: str,
base_period: Optional[Union[str, DateLike]] = None,
) -> "CalendarTimeTrend":
"""
Create a TimeTrend from a string description.
Provided for compatibility with common string names.
Parameters
----------
freq : str
A string convertible to a pandas frequency.
trend : {"n", "c", "t", "ct", "ctt"}
The string representation of the time trend. The terms are:
* "n": No trend terms
* "c": A constant only
* "t": Linear time trend only
* "ct": A constant and a time trend
* "ctt": A constant, a time trend and a quadratic time trend
base_period : {str, pd.Timestamp}, default None
The base period to use when computing the time stamps. This value
is treated as 1 and so all other time indices are defined as the
number of periods since or before this time stamp. If not
provided, defaults to pandas base period for a PeriodIndex.
Returns
-------
TimeTrend
The TimeTrend instance.
"""
constant = trend.startswith("c")
order = 0
if "tt" in trend:
order = 2
elif "t" in trend:
order = 1
return cls(freq, constant, order, base_period=base_period) | Create a TimeTrend from a string description.
Provided for compatibility with common string names.
Parameters
----------
freq : str
A string convertible to a pandas frequency.
trend : {"n", "c", "t", "ct", "ctt"}
The string representation of the time trend. The terms are:
* "n": No trend terms
* "c": A constant only
* "t": Linear time trend only
* "ct": A constant and a time trend
* "ctt": A constant, a time trend and a quadratic time trend
base_period : {str, pd.Timestamp}, default None
The base period to use when computing the time stamps. This value
is treated as 1 and so all other time indices are defined as the
number of periods since or before this time stamp. If not
provided, defaults to pandas base period for a PeriodIndex.
Returns
-------
TimeTrend
The TimeTrend instance. | from_string | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def __init__(
self,
index: Union[Sequence[Hashable], pd.Index],
*,
period: Optional[Union[float, int]] = None,
constant: bool = False,
order: int = 0,
seasonal: bool = False,
fourier: int = 0,
additional_terms: Sequence[DeterministicTerm] = (),
drop: bool = False,
):
if not isinstance(index, pd.Index):
index = pd.Index(index)
self._index = index
self._deterministic_terms: list[DeterministicTerm] = []
self._extendable = False
self._index_freq = None
self._validate_index()
period = float_like(period, "period", optional=True)
self._constant = constant = bool_like(constant, "constant")
self._order = required_int_like(order, "order")
self._seasonal = seasonal = bool_like(seasonal, "seasonal")
self._fourier = required_int_like(fourier, "fourier")
additional_terms = tuple(additional_terms)
self._cached_in_sample = None
self._drop = bool_like(drop, "drop")
self._additional_terms = additional_terms
if constant or order:
self._deterministic_terms.append(TimeTrend(constant, order))
if seasonal and fourier:
raise ValueError(
"""seasonal and fourier can be initialized through the \
constructor since these will be necessarily perfectly collinear. Instead, \
you can pass additional components using the additional_terms input."""
)
if (seasonal or fourier) and period is None:
if period is None:
self._period = period = freq_to_period(self._index_freq)
if seasonal:
period = required_int_like(period, "period")
self._deterministic_terms.append(Seasonality(period))
elif fourier:
period = float_like(period, "period")
assert period is not None
self._deterministic_terms.append(Fourier(period, order=fourier))
for term in additional_terms:
if not isinstance(term, DeterministicTerm):
raise TypeError(
"All additional terms must be instances of subsclasses "
"of DeterministicTerm"
)
if term not in self._deterministic_terms:
self._deterministic_terms.append(term)
else:
raise ValueError(
"One or more terms in additional_terms has been added "
"through the parameters of the constructor. Terms must "
"be unique."
)
self._period = period
self._retain_cols: Optional[list[Hashable]] = None | seasonal and fourier can be initialized through the \
constructor since these will be necessarily perfectly collinear. Instead, \
you can pass additional components using the additional_terms input. | __init__ | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def index(self) -> pd.Index:
"""The index of the process"""
return self._index | The index of the process | index | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def terms(self) -> list[DeterministicTerm]:
"""The deterministic terms included in the process"""
return self._deterministic_terms | The deterministic terms included in the process | terms | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def range(
self,
start: Union[IntLike, DateLike, str],
stop: Union[IntLike, DateLike, str],
) -> pd.DataFrame:
"""
Deterministic terms spanning a range of observations
Parameters
----------
start : {int, str, dt.datetime, pd.Timestamp, np.datetime64}
The first observation.
stop : {int, str, dt.datetime, pd.Timestamp, np.datetime64}
The final observation. Inclusive to match most prediction
function in statsmodels.
Returns
-------
DataFrame
A data frame of deterministic terms
"""
if not self._extendable:
raise TypeError(
"""The index in the deterministic process does not \
support extension. Only PeriodIndex, DatetimeIndex with a frequency, \
RangeIndex, and integral Indexes that start at 0 and have only unit \
differences can be extended when producing out-of-sample forecasts.
"""
)
if type(self._index) in (pd.RangeIndex,) or is_int_index(self._index):
start = required_int_like(start, "start")
stop = required_int_like(stop, "stop")
# Add 1 to ensure that the end point is inclusive
stop += 1
return self._range_from_range_index(start, stop)
if isinstance(start, (int, np.integer)):
start = self._int_to_timestamp(start, "start")
else:
start = pd.Timestamp(start)
if isinstance(stop, (int, np.integer)):
stop = self._int_to_timestamp(stop, "stop")
else:
stop = pd.Timestamp(stop)
return self._range_from_time_index(start, stop) | Deterministic terms spanning a range of observations
Parameters
----------
start : {int, str, dt.datetime, pd.Timestamp, np.datetime64}
The first observation.
stop : {int, str, dt.datetime, pd.Timestamp, np.datetime64}
The final observation. Inclusive to match most prediction
function in statsmodels.
Returns
-------
DataFrame
A data frame of deterministic terms | range | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def apply(self, index):
"""
Create an identical determinstic process with a different index
Parameters
----------
index : index_like
An index-like object. If not an index, it is converted to an
index.
Returns
-------
DeterministicProcess
The deterministic process applied to a different index
"""
return DeterministicProcess(
index,
period=self._period,
constant=self._constant,
order=self._order,
seasonal=self._seasonal,
fourier=self._fourier,
additional_terms=self._additional_terms,
drop=self._drop,
) | Create an identical determinstic process with a different index
Parameters
----------
index : index_like
An index-like object. If not an index, it is converted to an
index.
Returns
-------
DeterministicProcess
The deterministic process applied to a different index | apply | python | statsmodels/statsmodels | statsmodels/tsa/deterministic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/deterministic.py | BSD-3-Clause |
def varfilter(x, a):
'''apply an autoregressive filter to a series x
Warning: I just found out that convolve does not work as I
thought, this likely does not work correctly for
nvars>3
x can be 2d, a can be 1d, 2d, or 3d
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
a : array_like
autoregressive filter coefficients, ar lag polynomial
see Notes
Returns
-------
y : ndarray, 2d
filtered array, number of columns determined by x and a
Notes
-----
In general form this uses the linear filter ::
y = a(L)x
where
x : nobs, nvars
a : nlags, nvars, npoly
Depending on the shape and dimension of a this uses different
Lag polynomial arrays
case 1 : a is 1d or (nlags,1)
one lag polynomial is applied to all variables (columns of x)
case 2 : a is 2d, (nlags, nvars)
each series is independently filtered with its own
lag polynomial, uses loop over nvar
case 3 : a is 3d, (nlags, nvars, npoly)
the ith column of the output array is given by the linear filter
defined by the 2d array a[:,:,i], i.e. ::
y[:,i] = a(.,.,i)(L) * x
y[t,i] = sum_p sum_j a(p,j,i)*x(t-p,j)
for p = 0,...nlags-1, j = 0,...nvars-1,
for all t >= nlags
Note: maybe convert to axis=1, Not
TODO: initial conditions
'''
x = np.asarray(x)
a = np.asarray(a)
if x.ndim == 1:
x = x[:,None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
nvar = x.shape[1]
nlags = a.shape[0]
ntrim = nlags//2
# for x is 2d with ncols >1
if a.ndim == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a[:,None], mode='valid')
# alternative:
#return signal.lfilter(a,[1],x.astype(float),axis=0)
elif a.ndim == 2:
if min(a.shape) == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a, mode='valid')
# case: independent ar
#(a bit like recserar in gauss, but no x yet)
#(no, reserar is inverse filter)
result = np.zeros((x.shape[0]-nlags+1, nvar))
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:,i] = signal.convolve(x[:,i], a[:,i], mode='valid')
return result
elif a.ndim == 3:
# case: vector autoregressive with lag matrices
# Note: we must have shape[1] == shape[2] == nvar
yf = signal.convolve(x[:,:,None], a)
yvalid = yf[ntrim:-ntrim, yf.shape[1]//2,:]
return yvalid | apply an autoregressive filter to a series x
Warning: I just found out that convolve does not work as I
thought, this likely does not work correctly for
nvars>3
x can be 2d, a can be 1d, 2d, or 3d
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
a : array_like
autoregressive filter coefficients, ar lag polynomial
see Notes
Returns
-------
y : ndarray, 2d
filtered array, number of columns determined by x and a
Notes
-----
In general form this uses the linear filter ::
y = a(L)x
where
x : nobs, nvars
a : nlags, nvars, npoly
Depending on the shape and dimension of a this uses different
Lag polynomial arrays
case 1 : a is 1d or (nlags,1)
one lag polynomial is applied to all variables (columns of x)
case 2 : a is 2d, (nlags, nvars)
each series is independently filtered with its own
lag polynomial, uses loop over nvar
case 3 : a is 3d, (nlags, nvars, npoly)
the ith column of the output array is given by the linear filter
defined by the 2d array a[:,:,i], i.e. ::
y[:,i] = a(.,.,i)(L) * x
y[t,i] = sum_p sum_j a(p,j,i)*x(t-p,j)
for p = 0,...nlags-1, j = 0,...nvars-1,
for all t >= nlags
Note: maybe convert to axis=1, Not
TODO: initial conditions | varfilter | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def varinversefilter(ar, nobs, version=1):
'''creates inverse ar filter (MA representation) recursively
The VAR lag polynomial is defined by ::
ar(L) y_t = u_t or
y_t = -ar_{-1}(L) y_{t-1} + u_t
the returned lagpolynomial is arinv(L)=ar^{-1}(L) in ::
y_t = arinv(L) u_t
Parameters
----------
ar : ndarray, (nlags,nvars,nvars)
matrix lagpolynomial, currently no exog
first row should be identity
Returns
-------
arinv : ndarray, (nobs,nvars,nvars)
Notes
-----
'''
nlags, nvars, nvarsex = ar.shape
if nvars != nvarsex:
print('exogenous variables not implemented not tested')
arinv = np.zeros((nobs+1, nvarsex, nvars))
arinv[0,:,:] = ar[0]
arinv[1:nlags,:,:] = -ar[1:]
if version == 1:
for i in range(2,nobs+1):
tmp = np.zeros((nvars,nvars))
for p in range(1,nlags):
tmp += np.dot(-ar[p],arinv[i-p,:,:])
arinv[i,:,:] = tmp
if version == 0:
for i in range(nlags+1,nobs+1):
print(ar[1:].shape, arinv[i-1:i-nlags:-1,:,:].shape)
#arinv[i,:,:] = np.dot(-ar[1:],arinv[i-1:i-nlags:-1,:,:])
#print(np.tensordot(-ar[1:],arinv[i-1:i-nlags:-1,:,:],axes=([2],[1])).shape
#arinv[i,:,:] = np.tensordot(-ar[1:],arinv[i-1:i-nlags:-1,:,:],axes=([2],[1]))
raise NotImplementedError('waiting for generalized ufuncs or something')
return arinv | creates inverse ar filter (MA representation) recursively
The VAR lag polynomial is defined by ::
ar(L) y_t = u_t or
y_t = -ar_{-1}(L) y_{t-1} + u_t
the returned lagpolynomial is arinv(L)=ar^{-1}(L) in ::
y_t = arinv(L) u_t
Parameters
----------
ar : ndarray, (nlags,nvars,nvars)
matrix lagpolynomial, currently no exog
first row should be identity
Returns
-------
arinv : ndarray, (nobs,nvars,nvars)
Notes
----- | varinversefilter | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def vargenerate(ar, u, initvalues=None):
'''generate an VAR process with errors u
similar to gauss
uses loop
Parameters
----------
ar : array (nlags,nvars,nvars)
matrix lagpolynomial
u : array (nobs,nvars)
exogenous variable, error term for VAR
Returns
-------
sar : array (1+nobs,nvars)
sample of var process, inverse filtered u
does not trim initial condition y_0 = 0
Examples
--------
# generate random sample of VAR
nobs, nvars = 10, 2
u = numpy.random.randn(nobs,nvars)
a21 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[ 0., -0.6]]])
vargenerate(a21,u)
# Impulse Response to an initial shock to the first variable
imp = np.zeros((nobs, nvars))
imp[0,0] = 1
vargenerate(a21,imp)
'''
nlags, nvars, nvarsex = ar.shape
nlagsm1 = nlags - 1
nobs = u.shape[0]
if nvars != nvarsex:
print('exogenous variables not implemented not tested')
if u.shape[1] != nvars:
raise ValueError('u needs to have nvars columns')
if initvalues is None:
sar = np.zeros((nobs+nlagsm1, nvars))
start = nlagsm1
else:
start = max(nlagsm1, initvalues.shape[0])
sar = np.zeros((nobs+start, nvars))
sar[start-initvalues.shape[0]:start] = initvalues
#sar[nlagsm1:] = u
sar[start:] = u
#if version == 1:
for i in range(start,start+nobs):
for p in range(1,nlags):
sar[i] += np.dot(sar[i-p,:],-ar[p])
return sar | generate an VAR process with errors u
similar to gauss
uses loop
Parameters
----------
ar : array (nlags,nvars,nvars)
matrix lagpolynomial
u : array (nobs,nvars)
exogenous variable, error term for VAR
Returns
-------
sar : array (1+nobs,nvars)
sample of var process, inverse filtered u
does not trim initial condition y_0 = 0
Examples
--------
# generate random sample of VAR
nobs, nvars = 10, 2
u = numpy.random.randn(nobs,nvars)
a21 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[ 0., -0.6]]])
vargenerate(a21,u)
# Impulse Response to an initial shock to the first variable
imp = np.zeros((nobs, nvars))
imp[0,0] = 1
vargenerate(a21,imp) | vargenerate | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def padone(x, front=0, back=0, axis=0, fillvalue=0):
'''pad with zeros along one axis, currently only axis=0
can be used sequentially to pad several axis
Examples
--------
>>> padone(np.ones((2,3)),1,3,axis=1)
array([[ 0., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 0., 0., 0.]])
>>> padone(np.ones((2,3)),1,1, fillvalue=np.nan)
array([[ NaN, NaN, NaN],
[ 1., 1., 1.],
[ 1., 1., 1.],
[ NaN, NaN, NaN]])
'''
#primitive version
shape = np.array(x.shape)
shape[axis] += (front + back)
shapearr = np.array(x.shape)
out = np.empty(shape)
out.fill(fillvalue)
startind = np.zeros(x.ndim)
startind[axis] = front
endind = startind + shapearr
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
#print(myslice
#print(out.shape
#print(out[tuple(myslice)].shape
out[tuple(myslice)] = x
return out | pad with zeros along one axis, currently only axis=0
can be used sequentially to pad several axis
Examples
--------
>>> padone(np.ones((2,3)),1,3,axis=1)
array([[ 0., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 0., 0., 0.]])
>>> padone(np.ones((2,3)),1,1, fillvalue=np.nan)
array([[ NaN, NaN, NaN],
[ 1., 1., 1.],
[ 1., 1., 1.],
[ NaN, NaN, NaN]]) | padone | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def trimone(x, front=0, back=0, axis=0):
'''trim number of array elements along one axis
Examples
--------
>>> xp = padone(np.ones((2,3)),1,3,axis=1)
>>> xp
array([[ 0., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 0., 0., 0.]])
>>> trimone(xp,1,3,1)
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
'''
shape = np.array(x.shape)
shape[axis] -= (front + back)
#print(shape, front, back
startind = np.zeros(x.ndim)
startind[axis] = front
endind = startind + shape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
#print(myslice
#print(shape, endind
#print(x[tuple(myslice)].shape
return x[tuple(myslice)] | trim number of array elements along one axis
Examples
--------
>>> xp = padone(np.ones((2,3)),1,3,axis=1)
>>> xp
array([[ 0., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 0., 0., 0.]])
>>> trimone(xp,1,3,1)
array([[ 1., 1., 1.],
[ 1., 1., 1.]]) | trimone | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def ar2full(ar):
'''make reduced lagpolynomial into a right side lagpoly array
'''
nlags, nvar,nvarex = ar.shape
return np.r_[np.eye(nvar,nvarex)[None,:,:],-ar] | make reduced lagpolynomial into a right side lagpoly array | ar2full | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def ar2lhs(ar):
'''convert full (rhs) lagpolynomial into a reduced, left side lagpoly array
this is mainly a reminder about the definition
'''
return -ar[1:] | convert full (rhs) lagpolynomial into a reduced, left side lagpoly array
this is mainly a reminder about the definition | ar2lhs | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def fit(self, nlags):
'''estimate parameters using ols
Parameters
----------
nlags : int
number of lags to include in regression, same for all variables
Returns
-------
None, but attaches
arhat : array (nlags, nvar, nvar)
full lag polynomial array
arlhs : array (nlags-1, nvar, nvar)
reduced lag polynomial for left hand side
other statistics as returned by linalg.lstsq : need to be completed
This currently assumes all parameters are estimated without restrictions.
In this case SUR is identical to OLS
estimation results are attached to the class instance
'''
self.nlags = nlags # without current period
nvars = self.nvars
#TODO: ar2s looks like a module variable, bug?
#lmat = lagmat(ar2s, nlags, trim='both', original='in')
lmat = lagmat(self.y, nlags, trim='both', original='in')
self.yred = lmat[:,:nvars]
self.xred = lmat[:,nvars:]
res = np.linalg.lstsq(self.xred, self.yred, rcond=-1)
self.estresults = res
self.arlhs = res[0].reshape(nlags, nvars, nvars)
self.arhat = ar2full(self.arlhs)
self.rss = res[1]
self.xredrank = res[2] | estimate parameters using ols
Parameters
----------
nlags : int
number of lags to include in regression, same for all variables
Returns
-------
None, but attaches
arhat : array (nlags, nvar, nvar)
full lag polynomial array
arlhs : array (nlags-1, nvar, nvar)
reduced lag polynomial for left hand side
other statistics as returned by linalg.lstsq : need to be completed
This currently assumes all parameters are estimated without restrictions.
In this case SUR is identical to OLS
estimation results are attached to the class instance | fit | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def predict(self):
'''calculate estimated timeseries (yhat) for sample
'''
if not hasattr(self, 'yhat'):
self.yhat = varfilter(self.y, self.arhat)
return self.yhat | calculate estimated timeseries (yhat) for sample | predict | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def covmat(self):
''' covariance matrix of estimate
# not sure it's correct, need to check orientation everywhere
# looks ok, display needs getting used to
>>> v.rss[None,None,:]*np.linalg.inv(np.dot(v.xred.T,v.xred))[:,:,None]
array([[[ 0.37247445, 0.32210609],
[ 0.1002642 , 0.08670584]],
[[ 0.1002642 , 0.08670584],
[ 0.45903637, 0.39696255]]])
>>>
>>> v.rss[0]*np.linalg.inv(np.dot(v.xred.T,v.xred))
array([[ 0.37247445, 0.1002642 ],
[ 0.1002642 , 0.45903637]])
>>> v.rss[1]*np.linalg.inv(np.dot(v.xred.T,v.xred))
array([[ 0.32210609, 0.08670584],
[ 0.08670584, 0.39696255]])
'''
#check if orientation is same as self.arhat
self.paramcov = (self.rss[None,None,:] *
np.linalg.inv(np.dot(self.xred.T, self.xred))[:,:,None]) | covariance matrix of estimate
# not sure it's correct, need to check orientation everywhere
# looks ok, display needs getting used to
>>> v.rss[None,None,:]*np.linalg.inv(np.dot(v.xred.T,v.xred))[:,:,None]
array([[[ 0.37247445, 0.32210609],
[ 0.1002642 , 0.08670584]],
[[ 0.1002642 , 0.08670584],
[ 0.45903637, 0.39696255]]])
>>>
>>> v.rss[0]*np.linalg.inv(np.dot(v.xred.T,v.xred))
array([[ 0.37247445, 0.1002642 ],
[ 0.1002642 , 0.45903637]])
>>> v.rss[1]*np.linalg.inv(np.dot(v.xred.T,v.xred))
array([[ 0.32210609, 0.08670584],
[ 0.08670584, 0.39696255]]) | covmat | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def forecast(self, horiz=1, u=None):
'''calculates forcast for horiz number of periods at end of sample
Parameters
----------
horiz : int (optional, default=1)
forecast horizon
u : array (horiz, nvars)
error term for forecast periods. If None, then u is zero.
Returns
-------
yforecast : array (nobs+horiz, nvars)
this includes the sample and the forecasts
'''
if u is None:
u = np.zeros((horiz, self.nvars))
return vargenerate(self.arhat, u, initvalues=self.y) | calculates forcast for horiz number of periods at end of sample
Parameters
----------
horiz : int (optional, default=1)
forecast horizon
u : array (horiz, nvars)
error term for forecast periods. If None, then u is zero.
Returns
-------
yforecast : array (nobs+horiz, nvars)
this includes the sample and the forecasts | forecast | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def vstack(self, a=None, name='ar'):
'''stack lagpolynomial vertically in 2d array
'''
if a is not None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
return a.reshape(-1, self.nvarall) | stack lagpolynomial vertically in 2d array | vstack | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def hstack(self, a=None, name='ar'):
'''stack lagpolynomial horizontally in 2d array
'''
if a is not None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
return a.swapaxes(1,2).reshape(-1, self.nvarall).T | stack lagpolynomial horizontally in 2d array | hstack | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def stacksquare(self, a=None, name='ar', orientation='vertical'):
'''stack lagpolynomial vertically in 2d square array with eye
'''
if a is not None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
astacked = a.reshape(-1, self.nvarall)
lenpk, nvars = astacked.shape #[0]
amat = np.eye(lenpk, k=nvars)
amat[:,:nvars] = astacked
return amat | stack lagpolynomial vertically in 2d square array with eye | stacksquare | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def vstackarma_minus1(self):
'''stack ar and lagpolynomial vertically in 2d array
'''
a = np.concatenate((self.ar[1:], self.ma[1:]),0)
return a.reshape(-1, self.nvarall) | stack ar and lagpolynomial vertically in 2d array | vstackarma_minus1 | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def hstackarma_minus1(self):
'''stack ar and lagpolynomial vertically in 2d array
this is the Kalman Filter representation, I think
'''
a = np.concatenate((self.ar[1:], self.ma[1:]),0)
return a.swapaxes(1,2).reshape(-1, self.nvarall) | stack ar and lagpolynomial vertically in 2d array
this is the Kalman Filter representation, I think | hstackarma_minus1 | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def getisstationary(self, a=None):
'''check whether the auto-regressive lag-polynomial is stationary
Returns
-------
isstationary : bool
*attaches*
areigenvalues : complex array
eigenvalues sorted by absolute value
References
----------
formula taken from NAG manual
'''
if a is not None:
a = a
else:
if self.isstructured:
a = -self.reduceform(self.ar)[1:]
else:
a = -self.ar[1:]
amat = self.stacksquare(a)
ev = np.sort(np.linalg.eigvals(amat))[::-1]
self.areigenvalues = ev
return (np.abs(ev) < 1).all() | check whether the auto-regressive lag-polynomial is stationary
Returns
-------
isstationary : bool
*attaches*
areigenvalues : complex array
eigenvalues sorted by absolute value
References
----------
formula taken from NAG manual | getisstationary | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def getisinvertible(self, a=None):
'''check whether the auto-regressive lag-polynomial is stationary
Returns
-------
isinvertible : bool
*attaches*
maeigenvalues : complex array
eigenvalues sorted by absolute value
References
----------
formula taken from NAG manual
'''
if a is not None:
a = a
else:
if self.isindependent:
a = self.reduceform(self.ma)[1:]
else:
a = self.ma[1:]
if a.shape[0] == 0:
# no ma lags
self.maeigenvalues = np.array([], np.complex)
return True
amat = self.stacksquare(a)
ev = np.sort(np.linalg.eigvals(amat))[::-1]
self.maeigenvalues = ev
return (np.abs(ev) < 1).all() | check whether the auto-regressive lag-polynomial is stationary
Returns
-------
isinvertible : bool
*attaches*
maeigenvalues : complex array
eigenvalues sorted by absolute value
References
----------
formula taken from NAG manual | getisinvertible | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def reduceform(self, apoly):
'''
this assumes no exog, todo
'''
if apoly.ndim != 3:
raise ValueError('apoly needs to be 3d')
nlags, nvarsex, nvars = apoly.shape
a = np.empty_like(apoly)
try:
a0inv = np.linalg.inv(a[0,:nvars, :])
except np.linalg.LinAlgError:
raise ValueError('matrix not invertible',
'ask for implementation of pinv')
for lag in range(nlags):
a[lag] = np.dot(a0inv, apoly[lag])
return a | this assumes no exog, todo | reduceform | python | statsmodels/statsmodels | statsmodels/tsa/varma_process.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/varma_process.py | BSD-3-Clause |
def sumofsq(x: np.ndarray, axis: int = 0) -> float | np.ndarray:
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x**2, axis=axis) | Helper function to calculate sum of squares along first axis | sumofsq | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def _get_period(data: pd.DatetimeIndex | pd.PeriodIndex, index_freq) -> int:
"""Shared helper to get period from frequenc or raise"""
if data.freq:
return freq_to_period(index_freq)
raise ValueError(
"freq cannot be inferred from endog and model includes seasonal "
"terms. The number of periods must be explicitly set when the "
"endog's index does not contain a frequency."
) | Shared helper to get period from frequenc or raise | _get_period | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def ar_lags(self) -> list[int] | None:
"""The autoregressive lags included in the model"""
lags = list(self._lags)
return None if not lags else lags | The autoregressive lags included in the model | ar_lags | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def hold_back(self) -> int | None:
"""The number of initial obs. excluded from the estimation sample."""
return self._hold_back | The number of initial obs. excluded from the estimation sample. | hold_back | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def trend(self) -> Literal["n", "c", "ct", "ctt"]:
"""The trend used in the model."""
return self._trend | The trend used in the model. | trend | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def seasonal(self) -> bool:
"""Flag indicating that the model contains a seasonal component."""
return self._seasonal | Flag indicating that the model contains a seasonal component. | seasonal | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def deterministic(self) -> DeterministicProcess | None:
"""The deterministic used to construct the model"""
return self._deterministics if self._user_deterministic else None | The deterministic used to construct the model | deterministic | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def period(self) -> int | None:
"""The period of the seasonal component."""
return self._period | The period of the seasonal component. | period | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def df_model(self) -> int:
"""The model degrees of freedom."""
return self._x.shape[1] | The model degrees of freedom. | df_model | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def exog_names(self) -> list[str] | None:
"""Names of exogenous variables included in model"""
return self._exog_names | Names of exogenous variables included in model | exog_names | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def initialize(self) -> None:
"""Initialize the model (no-op)."""
pass | Initialize the model (no-op). | initialize | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def fit(
self,
cov_type: str = "nonrobust",
cov_kwds: dict[str, Any] | None = None,
use_t: bool = False,
) -> AutoRegResultsWrapper:
"""
Estimate the model parameters.
Parameters
----------
cov_type : str
The covariance estimator to use. The most common choices are listed
below. Supports all covariance estimators that are available
in ``OLS.fit``.
* 'nonrobust' - The class OLS covariance estimator that assumes
homoskedasticity.
* 'HC0', 'HC1', 'HC2', 'HC3' - Variants of White's
(or Eiker-Huber-White) covariance estimator. `HC0` is the
standard implementation. The other make corrections to improve
the finite sample performance of the heteroskedasticity robust
covariance estimator.
* 'HAC' - Heteroskedasticity-autocorrelation robust covariance
estimation. Supports cov_kwds.
- `maxlags` integer (required) : number of lags to use.
- `kernel` callable or str (optional) : kernel
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett.
- `use_correction` bool (optional) : If true, use small sample
correction.
cov_kwds : dict, optional
A dictionary of keyword arguments to pass to the covariance
estimator. `nonrobust` and `HC#` do not support cov_kwds.
use_t : bool, optional
A flag indicating that inference should use the Student's t
distribution that accounts for model degree of freedom. If False,
uses the normal distribution. If None, defers the choice to
the cov_type. It also removes degree of freedom corrections from
the covariance estimator when cov_type is 'nonrobust'.
Returns
-------
AutoRegResults
Estimation results.
See Also
--------
statsmodels.regression.linear_model.OLS
Ordinary Least Squares estimation.
statsmodels.regression.linear_model.RegressionResults
See ``get_robustcov_results`` for a detailed list of available
covariance estimators and options.
Notes
-----
Use ``OLS`` to estimate model parameters and to estimate parameter
covariance.
"""
# TODO: Determine correction for degree-of-freedom
# Special case parameterless model
if self._x.shape[1] == 0:
return AutoRegResultsWrapper(
AutoRegResults(self, np.empty(0), np.empty((0, 0)))
)
ols_mod = OLS(self._y, self._x)
ols_res = ols_mod.fit(
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t
)
cov_params = ols_res.cov_params()
use_t = ols_res.use_t
if cov_type == "nonrobust" and not use_t:
nobs = self._y.shape[0]
k = self._x.shape[1]
scale = nobs / (nobs - k)
cov_params /= scale
res = AutoRegResults(
self,
ols_res.params,
cov_params,
ols_res.normalized_cov_params,
use_t=use_t,
)
return AutoRegResultsWrapper(res) | Estimate the model parameters.
Parameters
----------
cov_type : str
The covariance estimator to use. The most common choices are listed
below. Supports all covariance estimators that are available
in ``OLS.fit``.
* 'nonrobust' - The class OLS covariance estimator that assumes
homoskedasticity.
* 'HC0', 'HC1', 'HC2', 'HC3' - Variants of White's
(or Eiker-Huber-White) covariance estimator. `HC0` is the
standard implementation. The other make corrections to improve
the finite sample performance of the heteroskedasticity robust
covariance estimator.
* 'HAC' - Heteroskedasticity-autocorrelation robust covariance
estimation. Supports cov_kwds.
- `maxlags` integer (required) : number of lags to use.
- `kernel` callable or str (optional) : kernel
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett.
- `use_correction` bool (optional) : If true, use small sample
correction.
cov_kwds : dict, optional
A dictionary of keyword arguments to pass to the covariance
estimator. `nonrobust` and `HC#` do not support cov_kwds.
use_t : bool, optional
A flag indicating that inference should use the Student's t
distribution that accounts for model degree of freedom. If False,
uses the normal distribution. If None, defers the choice to
the cov_type. It also removes degree of freedom corrections from
the covariance estimator when cov_type is 'nonrobust'.
Returns
-------
AutoRegResults
Estimation results.
See Also
--------
statsmodels.regression.linear_model.OLS
Ordinary Least Squares estimation.
statsmodels.regression.linear_model.RegressionResults
See ``get_robustcov_results`` for a detailed list of available
covariance estimators and options.
Notes
-----
Use ``OLS`` to estimate model parameters and to estimate parameter
covariance. | fit | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def loglike(self, params: ArrayLike) -> float:
"""
Log-likelihood of model.
Parameters
----------
params : ndarray
The model parameters used to compute the log-likelihood.
Returns
-------
float
The log-likelihood value.
"""
nobs = self.nobs
resid = self._resid(params)
ssr = resid @ resid
llf = -(nobs / 2) * (np.log(2 * np.pi) + np.log(ssr / nobs) + 1)
return llf | Log-likelihood of model.
Parameters
----------
params : ndarray
The model parameters used to compute the log-likelihood.
Returns
-------
float
The log-likelihood value. | loglike | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def information(self, params: ArrayLike) -> np.ndarray:
"""
Fisher information matrix of model.
Returns -1 * Hessian of the log-likelihood evaluated at params.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
ndarray
The information matrix.
"""
resid = self._resid(params)
sigma2 = resid @ resid / self.nobs
return (self._x.T @ self._x) * (1 / sigma2) | Fisher information matrix of model.
Returns -1 * Hessian of the log-likelihood evaluated at params.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
ndarray
The information matrix. | information | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def _dynamic_predict(
self,
params: ArrayLike,
start: int,
end: int,
dynamic: int,
num_oos: int,
exog: Float64Array | None,
exog_oos: Float64Array | None,
) -> pd.Series:
"""
:param params:
:param start:
:param end:
:param dynamic:
:param num_oos:
:param exog:
:param exog_oos:
:return:
"""
reg = []
hold_back = self._hold_back
adj = 0
if start < hold_back:
# Adjust start and dynamic
adj = hold_back - start
start += adj
# New offset shifts, but must remain non-negative
dynamic = max(dynamic - adj, 0)
if (start - hold_back) <= self.nobs:
# _x is missing hold_back observations, which is why
# it is shifted by this amount
is_loc = slice(start - hold_back, end + 1 - hold_back)
x = self._x[is_loc]
if exog is not None:
x = x.copy()
# Replace final columns
x[:, -exog.shape[1] :] = exog[start : end + 1]
reg.append(x)
if num_oos > 0:
reg.append(self._setup_oos_forecast(num_oos, exog_oos))
_reg = np.vstack(reg)
det_col_idx = self._x.shape[1] - len(self._lags)
det_col_idx -= 0 if self.exog is None else self.exog.shape[1]
# Simple 1-step static forecasts for dynamic observations
forecasts = np.empty(_reg.shape[0])
forecasts[:dynamic] = _reg[:dynamic] @ params
for h in range(dynamic, _reg.shape[0]):
# Fill in regressor matrix
for j, lag in enumerate(self._lags):
fcast_loc = h - lag
if fcast_loc >= dynamic:
val = forecasts[fcast_loc]
else:
# If before the start of the forecasts, use actual values
val = self.endog[fcast_loc + start]
_reg[h, det_col_idx + j] = val
forecasts[h] = np.squeeze(_reg[h : h + 1] @ params)
return self._wrap_prediction(forecasts, start, end + 1 + num_oos, adj) | :param params:
:param start:
:param end:
:param dynamic:
:param num_oos:
:param exog:
:param exog_oos:
:return: | _dynamic_predict | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def _static_predict(
self,
params: Float64Array,
start: int,
end: int,
num_oos: int,
exog: Float64Array | None,
exog_oos: Float64Array | None,
) -> pd.Series:
"""
Path for static predictions
Parameters
----------
params : ndarray
The model parameters
start : int
Index of first observation
end : int
Index of last in-sample observation. Inclusive, so start:end+1
in slice notation.
num_oos : int
Number of out-of-sample observations, so that the returned size is
num_oos + (end - start + 1).
exog : {ndarray, DataFrame}
Array containing replacement exog values
exog_oos : {ndarray, DataFrame}
Containing forecast exog values
"""
hold_back = self._hold_back
nobs = self.endog.shape[0]
x = np.empty((0, self._x.shape[1]))
# Adjust start to reflect observations lost
adj = max(0, hold_back - start)
start += adj
if start <= nobs:
# Use existing regressors
is_loc = slice(start - hold_back, end + 1 - hold_back)
x = self._x[is_loc]
if exog is not None:
exog_a = np.asarray(exog)
x = x.copy()
# Replace final columns
x[:, -exog_a.shape[1] :] = exog_a[start : end + 1]
in_sample = x @ params
if num_oos == 0: # No out of sample
return self._wrap_prediction(in_sample, start, end + 1, adj)
out_of_sample = self._static_oos_predict(params, num_oos, exog_oos)
prediction = np.hstack((in_sample, out_of_sample))
return self._wrap_prediction(prediction, start, end + 1 + num_oos, adj) | Path for static predictions
Parameters
----------
params : ndarray
The model parameters
start : int
Index of first observation
end : int
Index of last in-sample observation. Inclusive, so start:end+1
in slice notation.
num_oos : int
Number of out-of-sample observations, so that the returned size is
num_oos + (end - start + 1).
exog : {ndarray, DataFrame}
Array containing replacement exog values
exog_oos : {ndarray, DataFrame}
Containing forecast exog values | _static_predict | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def predict(
self,
params: ArrayLike,
start: int | str | datetime.datetime | pd.Timestamp | None = None,
end: int | str | datetime.datetime | pd.Timestamp | None = None,
dynamic: bool | int = False,
exog: ArrayLike2D | None = None,
exog_oos: ArrayLike2D | None = None,
) -> pd.Series:
"""
In-sample prediction and out-of-sample forecasting.
Parameters
----------
params : array_like
The fitted model parameters.
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous variable.
Must has the same number of columns as the exog used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
predictions : {ndarray, Series}
Array of out of in-sample predictions and / or out-of-sample
forecasts.
"""
params, exog, exog_oos, start, end, num_oos = self._prepare_prediction(
params, exog, exog_oos, start, end
)
if self.exog is None and (exog is not None or exog_oos is not None):
raise ValueError(
"exog and exog_oos cannot be used when the model "
"does not contains exogenous regressors."
)
elif self.exog is not None:
if exog is not None and exog.shape != self.exog.shape:
msg = (
"The shape of exog {0} must match the shape of the "
"exog variable used to create the model {1}."
)
raise ValueError(msg.format(exog.shape, self.exog.shape))
if (
exog_oos is not None
and exog_oos.shape[1] != self.exog.shape[1]
):
msg = (
"The number of columns in exog_oos ({0}) must match "
"the number of columns in the exog variable used to "
"create the model ({1})."
)
raise ValueError(
msg.format(exog_oos.shape[1], self.exog.shape[1])
)
if num_oos > 0 and exog_oos is None:
raise ValueError(
"exog_oos must be provided when producing "
"out-of-sample forecasts."
)
elif exog_oos is not None and num_oos > exog_oos.shape[0]:
msg = (
"start and end indicate that {0} out-of-sample "
"predictions must be computed. exog_oos has {1} rows "
"but must have at least {0}."
)
raise ValueError(msg.format(num_oos, exog_oos.shape[0]))
if (isinstance(dynamic, bool) and not dynamic) or self._maxlag == 0:
# If model has no lags, static and dynamic are identical
return self._static_predict(
params, start, end, num_oos, exog, exog_oos
)
dynamic = self._parse_dynamic(dynamic, start)
return self._dynamic_predict(
params, start, end, dynamic, num_oos, exog, exog_oos
) | In-sample prediction and out-of-sample forecasting.
Parameters
----------
params : array_like
The fitted model parameters.
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous variable.
Must has the same number of columns as the exog used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
predictions : {ndarray, Series}
Array of out of in-sample predictions and / or out-of-sample
forecasts. | predict | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def params(self):
"""The estimated parameters."""
return self._params | The estimated parameters. | params | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def df_model(self):
"""The degrees of freedom consumed by the model."""
return self._df_model | The degrees of freedom consumed by the model. | df_model | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def df_resid(self):
"""The remaining degrees of freedom in the residuals."""
return self.nobs - self._df_model | The remaining degrees of freedom in the residuals. | df_resid | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def nobs(self):
"""
The number of observations after adjusting for losses due to lags.
"""
return self._nobs | The number of observations after adjusting for losses due to lags. | nobs | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def bse(self): # allow user to specify?
"""
The standard errors of the estimated parameters.
If `method` is 'cmle', then the standard errors that are returned are
the OLS standard errors of the coefficients. If the `method` is 'mle'
then they are computed using the numerical Hessian.
"""
return np.sqrt(np.diag(self.cov_params())) | The standard errors of the estimated parameters.
If `method` is 'cmle', then the standard errors that are returned are
the OLS standard errors of the coefficients. If the `method` is 'mle'
then they are computed using the numerical Hessian. | bse | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def _lag_repr(self):
"""Returns poly repr of an AR, (1 -phi1 L -phi2 L^2-...)"""
ar_lags = self._ar_lags if self._ar_lags is not None else []
k_ar = len(ar_lags)
ar_params = np.zeros(self._max_lag + 1)
ar_params[0] = 1
df_model = self._df_model
exog = self.model.exog
k_exog = exog.shape[1] if exog is not None else 0
params = self._params[df_model - k_ar - k_exog : df_model - k_exog]
for i, lag in enumerate(ar_lags):
ar_params[lag] = -params[i]
return ar_params | Returns poly repr of an AR, (1 -phi1 L -phi2 L^2-...) | _lag_repr | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def roots(self):
"""
The roots of the AR process.
The roots are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0.
Stability requires that the roots in modulus lie outside the unit
circle.
"""
# TODO: Specific to AR
lag_repr = self._lag_repr()
if lag_repr.shape[0] == 1:
return np.empty(0)
return np.roots(lag_repr) ** -1 | The roots of the AR process.
The roots are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0.
Stability requires that the roots in modulus lie outside the unit
circle. | roots | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def fittedvalues(self):
"""
The in-sample predicted values of the fitted AR model.
The `k_ar` initial values are computed via the Kalman Filter if the
model is fit by `mle`.
"""
return self.model.predict(self.params)[self._hold_back :] | The in-sample predicted values of the fitted AR model.
The `k_ar` initial values are computed via the Kalman Filter if the
model is fit by `mle`. | fittedvalues | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def test_serial_correlation(self, lags=None, model_df=None):
"""
Ljung-Box test for residual serial correlation
Parameters
----------
lags : int
The maximum number of lags to use in the test. Jointly tests that
all autocorrelations up to and including lag j are zero for
j = 1, 2, ..., lags. If None, uses min(10, nobs // 5).
model_df : int
The model degree of freedom to use when adjusting computing the
test statistic to account for parameter estimation. If None, uses
the number of AR lags included in the model.
Returns
-------
output : DataFrame
DataFrame containing three columns: the test statistic, the
p-value of the test, and the degree of freedom used in the test.
Notes
-----
Null hypothesis is no serial correlation.
The the test degree-of-freedom is 0 or negative once accounting for
model_df, then the test statistic's p-value is missing.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
Ljung-Box test for serial correlation.
"""
# Deferred to prevent circular import
from statsmodels.stats.diagnostic import acorr_ljungbox
lags = int_like(lags, "lags", optional=True)
model_df = int_like(model_df, "df_model", optional=True)
model_df = self.df_model if model_df is None else model_df
nobs_effective = self.resid.shape[0]
if lags is None:
lags = min(nobs_effective // 5, 10)
test_stats = acorr_ljungbox(
self.resid,
lags=lags,
boxpierce=False,
model_df=model_df,
)
cols = ["Ljung-Box", "LB P-value", "DF"]
if lags == 1:
df = max(0, 1 - model_df)
else:
df = np.clip(np.arange(1, lags + 1) - model_df, 0, np.inf)
df = df.astype(int)
test_stats["df"] = df
index = pd.RangeIndex(1, lags + 1, name="Lag")
return pd.DataFrame(test_stats, columns=cols, index=index) | Ljung-Box test for residual serial correlation
Parameters
----------
lags : int
The maximum number of lags to use in the test. Jointly tests that
all autocorrelations up to and including lag j are zero for
j = 1, 2, ..., lags. If None, uses min(10, nobs // 5).
model_df : int
The model degree of freedom to use when adjusting computing the
test statistic to account for parameter estimation. If None, uses
the number of AR lags included in the model.
Returns
-------
output : DataFrame
DataFrame containing three columns: the test statistic, the
p-value of the test, and the degree of freedom used in the test.
Notes
-----
Null hypothesis is no serial correlation.
The the test degree-of-freedom is 0 or negative once accounting for
model_df, then the test statistic's p-value is missing.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
Ljung-Box test for serial correlation. | test_serial_correlation | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def test_normality(self):
"""
Test for normality of standardized residuals.
Returns
-------
Series
Series containing four values, the test statistic and its p-value,
the skewness and the kurtosis.
Notes
-----
Null hypothesis is normality.
See Also
--------
statsmodels.stats.stattools.jarque_bera
The Jarque-Bera test of normality.
"""
# Deferred to prevent circular import
from statsmodels.stats.stattools import jarque_bera
index = ["Jarque-Bera", "P-value", "Skewness", "Kurtosis"]
return pd.Series(jarque_bera(self.resid), index=index) | Test for normality of standardized residuals.
Returns
-------
Series
Series containing four values, the test statistic and its p-value,
the skewness and the kurtosis.
Notes
-----
Null hypothesis is normality.
See Also
--------
statsmodels.stats.stattools.jarque_bera
The Jarque-Bera test of normality. | test_normality | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def test_heteroskedasticity(self, lags=None):
"""
ARCH-LM test of residual heteroskedasticity
Parameters
----------
lags : int
The maximum number of lags to use in the test. Jointly tests that
all squared autocorrelations up to and including lag j are zero for
j = 1, 2, ..., lags. If None, uses lag=12*(nobs/100)^{1/4}.
Returns
-------
Series
Series containing the test statistic and its p-values.
See Also
--------
statsmodels.stats.diagnostic.het_arch
ARCH-LM test.
statsmodels.stats.diagnostic.acorr_lm
LM test for autocorrelation.
"""
from statsmodels.stats.diagnostic import het_arch
lags = int_like(lags, "lags", optional=True)
nobs_effective = self.resid.shape[0]
if lags is None:
lags = min(nobs_effective // 5, 10)
out = []
for lag in range(1, lags + 1):
res = het_arch(self.resid, nlags=lag)
out.append([res[0], res[1], lag])
index = pd.RangeIndex(1, lags + 1, name="Lag")
cols = ["ARCH-LM", "P-value", "DF"]
return pd.DataFrame(out, columns=cols, index=index) | ARCH-LM test of residual heteroskedasticity
Parameters
----------
lags : int
The maximum number of lags to use in the test. Jointly tests that
all squared autocorrelations up to and including lag j are zero for
j = 1, 2, ..., lags. If None, uses lag=12*(nobs/100)^{1/4}.
Returns
-------
Series
Series containing the test statistic and its p-values.
See Also
--------
statsmodels.stats.diagnostic.het_arch
ARCH-LM test.
statsmodels.stats.diagnostic.acorr_lm
LM test for autocorrelation. | test_heteroskedasticity | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def diagnostic_summary(self):
"""
Returns a summary containing standard model diagnostic tests
Returns
-------
Summary
A summary instance with panels for serial correlation tests,
normality tests and heteroskedasticity tests.
See Also
--------
test_serial_correlation
Test models residuals for serial correlation.
test_normality
Test models residuals for deviations from normality.
test_heteroskedasticity
Test models residuals for conditional heteroskedasticity.
"""
from statsmodels.iolib.table import SimpleTable
spacer = SimpleTable([""])
smry = Summary()
sc = self.test_serial_correlation()
sc = sc.loc[sc.DF > 0]
values = [[i + 1] + row for i, row in enumerate(sc.values.tolist())]
data_fmts = ("%10d", "%10.3f", "%10.3f", "%10d")
if sc.shape[0]:
tab = SimpleTable(
values,
headers=["Lag"] + list(sc.columns),
title="Test of No Serial Correlation",
header_align="r",
data_fmts=data_fmts,
)
smry.tables.append(tab)
smry.tables.append(spacer)
jb = self.test_normality()
data_fmts = ("%10.3f", "%10.3f", "%10.3f", "%10.3f")
tab = SimpleTable(
[jb.values],
headers=list(jb.index),
title="Test of Normality",
header_align="r",
data_fmts=data_fmts,
)
smry.tables.append(tab)
smry.tables.append(spacer)
arch_lm = self.test_heteroskedasticity()
values = [
[i + 1] + row for i, row in enumerate(arch_lm.values.tolist())
]
data_fmts = ("%10d", "%10.3f", "%10.3f", "%10d")
tab = SimpleTable(
values,
headers=["Lag"] + list(arch_lm.columns),
title="Test of Conditional Homoskedasticity",
header_align="r",
data_fmts=data_fmts,
)
smry.tables.append(tab)
return smry | Returns a summary containing standard model diagnostic tests
Returns
-------
Summary
A summary instance with panels for serial correlation tests,
normality tests and heteroskedasticity tests.
See Also
--------
test_serial_correlation
Test models residuals for serial correlation.
test_normality
Test models residuals for deviations from normality.
test_heteroskedasticity
Test models residuals for conditional heteroskedasticity. | diagnostic_summary | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def get_prediction(
self, start=None, end=None, dynamic=False, exog=None, exog_oos=None
):
"""
Predictions and prediction intervals
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous variable.
Must has the same number of columns as the exog used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
PredictionResults
Prediction results with mean and prediction intervals
"""
mean = self.predict(
start=start, end=end, dynamic=dynamic, exog=exog, exog_oos=exog_oos
)
mean_var = np.full_like(mean, self.sigma2)
mean_var[np.isnan(mean)] = np.nan
start = 0 if start is None else start
end = self.model._index[-1] if end is None else end
_, _, oos, _ = self.model._get_prediction_index(start, end)
if oos > 0:
ar_params = self._lag_repr()
ma = arma2ma(ar_params, np.ones(1), lags=oos)
mean_var[-oos:] = self.sigma2 * np.cumsum(ma**2)
if isinstance(mean, pd.Series):
mean_var = pd.Series(mean_var, index=mean.index)
return PredictionResults(mean, mean_var) | Predictions and prediction intervals
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous variable.
Must has the same number of columns as the exog used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
PredictionResults
Prediction results with mean and prediction intervals | get_prediction | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def forecast(self, steps=1, exog=None):
"""
Out-of-sample forecasts
Parameters
----------
steps : {int, str, datetime}, default 1
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency,
steps must be an integer.
exog : {ndarray, DataFrame}
Exogenous values to use out-of-sample. Must have same number of
columns as original exog data and at least `steps` rows
Returns
-------
array_like
Array of out of in-sample predictions and / or out-of-sample
forecasts.
See Also
--------
AutoRegResults.predict
In- and out-of-sample predictions
AutoRegResults.get_prediction
In- and out-of-sample predictions and confidence intervals
"""
start = self.model.data.orig_endog.shape[0]
if isinstance(steps, (int, np.integer)):
end = start + steps - 1
else:
end = steps
return self.predict(start=start, end=end, dynamic=False, exog_oos=exog) | Out-of-sample forecasts
Parameters
----------
steps : {int, str, datetime}, default 1
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency,
steps must be an integer.
exog : {ndarray, DataFrame}
Exogenous values to use out-of-sample. Must have same number of
columns as original exog data and at least `steps` rows
Returns
-------
array_like
Array of out of in-sample predictions and / or out-of-sample
forecasts.
See Also
--------
AutoRegResults.predict
In- and out-of-sample predictions
AutoRegResults.get_prediction
In- and out-of-sample predictions and confidence intervals | forecast | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def _plot_predictions(
self,
predictions,
start,
end,
alpha,
in_sample,
fig,
figsize,
):
"""Shared helper for plotting predictions"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
start = 0 if start is None else start
end = self.model._index[-1] if end is None else end
_, _, oos, _ = self.model._get_prediction_index(start, end)
ax = fig.add_subplot(111)
mean = predictions.predicted_mean
if not in_sample and oos:
if isinstance(mean, pd.Series):
mean = mean.iloc[-oos:]
elif not in_sample:
raise ValueError(
"in_sample is False but there are no"
"out-of-sample forecasts to plot."
)
ax.plot(mean, zorder=2, label="Forecast")
if oos and alpha is not None:
ci = np.asarray(predictions.conf_int(alpha))
lower, upper = ci[-oos:, 0], ci[-oos:, 1]
label = f"{1 - alpha:.0%} confidence interval"
x = ax.get_lines()[-1].get_xdata()
ax.fill_between(
x[-oos:],
lower,
upper,
color="gray",
alpha=0.5,
label=label,
zorder=1,
)
ax.legend(loc="best")
return fig | Shared helper for plotting predictions | _plot_predictions | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def plot_predict(
self,
start=None,
end=None,
dynamic=False,
exog=None,
exog_oos=None,
alpha=0.05,
in_sample=True,
fig=None,
figsize=None,
):
"""
Plot in- and out-of-sample predictions
Parameters
----------\n%(predict_params)s
alpha : {float, None}
The tail probability not covered by the confidence interval. Must
be in (0, 1). Confidence interval is constructed assuming normally
distributed shocks. If None, figure will not show the confidence
interval.
in_sample : bool
Flag indicating whether to include the in-sample period in the
plot.
fig : Figure
An existing figure handle. If not provided, a new figure is
created.
figsize: tuple[float, float]
Tuple containing the figure size values.
Returns
-------
Figure
Figure handle containing the plot.
"""
predictions = self.get_prediction(
start=start, end=end, dynamic=dynamic, exog=exog, exog_oos=exog_oos
)
return self._plot_predictions(
predictions, start, end, alpha, in_sample, fig, figsize
) | Plot in- and out-of-sample predictions
Parameters
----------\n%(predict_params)s
alpha : {float, None}
The tail probability not covered by the confidence interval. Must
be in (0, 1). Confidence interval is constructed assuming normally
distributed shocks. If None, figure will not show the confidence
interval.
in_sample : bool
Flag indicating whether to include the in-sample period in the
plot.
fig : Figure
An existing figure handle. If not provided, a new figure is
created.
figsize: tuple[float, float]
Tuple containing the figure size values.
Returns
-------
Figure
Figure handle containing the plot. | plot_predict | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def plot_diagnostics(self, lags=10, fig=None, figsize=None):
"""
Diagnostic plots for standardized residuals
Parameters
----------
lags : int, optional
Number of lags to include in the correlogram. Default is 10.
fig : Figure, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residuals, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
See Also
--------
statsmodels.graphics.gofplots.qqplot
statsmodels.graphics.tsaplots.plot_acf
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
# Eliminate residuals associated with burned or diffuse likelihoods
resid = self.resid
# Top-left: residuals vs time
ax = fig.add_subplot(221)
if hasattr(self.model.data, "dates") and self.data.dates is not None:
x = self.model.data.dates._mpl_repr()
x = x[self.model.hold_back :]
else:
hold_back = self.model.hold_back
x = hold_back + np.arange(self.resid.shape[0])
std_resid = resid / np.sqrt(self.sigma2)
ax.plot(x, std_resid)
ax.hlines(0, x[0], x[-1], alpha=0.5)
ax.set_xlim(x[0], x[-1])
ax.set_title("Standardized residual")
# Top-right: histogram, Gaussian kernel density, Normal density
# Can only do histogram and Gaussian kernel density on the non-null
# elements
std_resid_nonmissing = std_resid[~(np.isnan(resid))]
ax = fig.add_subplot(222)
ax.hist(std_resid_nonmissing, density=True, label="Hist")
kde = gaussian_kde(std_resid)
xlim = (-1.96 * 2, 1.96 * 2)
x = np.linspace(xlim[0], xlim[1])
ax.plot(x, kde(x), label="KDE")
ax.plot(x, norm.pdf(x), label="N(0,1)")
ax.set_xlim(xlim)
ax.legend()
ax.set_title("Histogram plus estimated density")
# Bottom-left: QQ plot
ax = fig.add_subplot(223)
from statsmodels.graphics.gofplots import qqplot
qqplot(std_resid, line="s", ax=ax)
ax.set_title("Normal Q-Q")
# Bottom-right: Correlogram
ax = fig.add_subplot(224)
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(resid, ax=ax, lags=lags)
ax.set_title("Correlogram")
ax.set_ylim(-1, 1)
return fig | Diagnostic plots for standardized residuals
Parameters
----------
lags : int, optional
Number of lags to include in the correlogram. Default is 10.
fig : Figure, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residuals, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
See Also
--------
statsmodels.graphics.gofplots.qqplot
statsmodels.graphics.tsaplots.plot_acf | plot_diagnostics | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def summary(self, alpha=0.05):
"""
Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
model = self.model
title = model.__class__.__name__ + " Model Results"
method = "Conditional MLE"
# get sample
start = self._hold_back
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime("%m-%d-%Y")]
sample += ["- " + dates[-1].strftime("%m-%d-%Y")]
else:
sample = [str(start), str(len(self.data.orig_endog))]
model = model.__class__.__name__
if self.model.seasonal:
model = "Seas. " + model
if self.ar_lags is not None and len(self.ar_lags) < self._max_lag:
model = "Restr. " + model
if self.model.exog is not None:
model += "-X"
order = f"({self._max_lag})"
dep_name = str(self.model.endog_names)
top_left = [
("Dep. Variable:", [dep_name]),
("Model:", [model + order]),
("Method:", [method]),
("Date:", None),
("Time:", None),
("Sample:", [sample[0]]),
("", [sample[1]]),
]
top_right = [
("No. Observations:", [str(len(self.model.endog))]),
("Log Likelihood", ["%#5.3f" % self.llf]),
("S.D. of innovations", ["%#5.3f" % self.sigma2**0.5]),
("AIC", ["%#5.3f" % self.aic]),
("BIC", ["%#5.3f" % self.bic]),
("HQIC", ["%#5.3f" % self.hqic]),
]
smry = Summary()
smry.add_table_2cols(
self, gleft=top_left, gright=top_right, title=title
)
smry.add_table_params(self, alpha=alpha, use_t=False)
# Make the roots table
from statsmodels.iolib.table import SimpleTable
if self._max_lag:
arstubs = ["AR.%d" % i for i in range(1, self._max_lag + 1)]
stubs = arstubs
roots = self.roots
freq = self.arfreq
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
roots_table = SimpleTable(
[
(
"%17.4f" % row[0],
"%+17.4fj" % row[1],
"%17.4f" % row[2],
"%17.4f" % row[3],
)
for row in data
],
headers=[
" Real",
" Imaginary",
" Modulus",
" Frequency",
],
title="Roots",
stubs=stubs,
)
smry.tables.append(roots_table)
if self._summary_text:
extra_txt = smry.extra_txt if smry.extra_txt is not None else []
smry.add_extra_txt(extra_txt + [self._summary_text])
return smry | Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary | summary | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def apply(self, endog, exog=None, refit=False, fit_kwargs=None):
"""
Apply the fitted parameters to new data unrelated to the original data
Creates a new result object using the current fitted parameters,
applied to a completely new dataset that is assumed to be unrelated to
the model's original data. The new results can then be used for
analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
refit : bool, optional
Whether to re-fit the parameters, using the new dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`).
Returns
-------
AutoRegResults
Updated results object containing results for the new dataset.
See Also
--------
AutoRegResults.append
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Notes
-----
The `endog` argument to this method should consist of new observations
that are not necessarily related to the original model's `endog`
dataset.
Care is needed when using deterministic processes with cyclical
components such as seasonal dummies or Fourier series. These
deterministic components will align to the first observation
in the data and so it is essential that any new data have the
same initial period.
Examples
--------
>>> import pandas as pd
>>> from statsmodels.tsa.ar_model import AutoReg
>>> index = pd.period_range(start='2000', periods=3, freq='Y')
>>> original_observations = pd.Series([1.2, 1.5, 1.8], index=index)
>>> mod = AutoReg(original_observations, lags=1, trend="n")
>>> res = mod.fit()
>>> print(res.params)
y.L1 1.219512
dtype: float64
>>> print(res.fittedvalues)
2001 1.463415
2002 1.829268
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2003 2.195122
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='1980', periods=3, freq='Y')
>>> new_observations = pd.Series([1.4, 0.3, 1.2], index=new_index)
>>> new_res = res.apply(new_observations)
>>> print(new_res.params)
y.L1 1.219512
dtype: float64
>>> print(new_res.fittedvalues)
1981 1.707317
1982 0.365854
Freq: A-DEC, dtype: float64
>>> print(new_res.forecast(1))
1983 1.463415
Freq: A-DEC, dtype: float64
"""
existing = self.model
try:
deterministic = existing.deterministic
if deterministic is not None:
if isinstance(endog, (pd.Series, pd.DataFrame)):
index = endog.index
else:
index = np.arange(endog.shape[0])
deterministic = deterministic.apply(index)
mod = AutoReg(
endog,
lags=existing.ar_lags,
trend=existing.trend,
seasonal=existing.seasonal,
exog=exog,
hold_back=existing.hold_back,
period=existing.period,
deterministic=deterministic,
old_names=False,
)
except Exception as exc:
error = (
"An exception occured during the creation of the cloned "
"AutoReg instance when applying the existing model "
"specification to the new data. The original traceback "
"appears below."
)
exc.args = (error,) + exc.args
raise exc.with_traceback(exc.__traceback__)
if (mod.exog is None) != (existing.exog is None):
if existing.exog is not None:
raise ValueError(
"exog must be provided when the original model contained "
"exog variables"
)
raise ValueError(
"exog must be None when the original model did not contain "
"exog variables"
)
if (
existing.exog is not None
and existing.exog.shape[1] != mod.exog.shape[1]
):
raise ValueError(
"The number of exog variables passed must match the original "
f"number of exog values ({existing.exog.shape[1]})"
)
if refit:
fit_kwargs = {} if fit_kwargs is None else fit_kwargs
return mod.fit(**fit_kwargs)
smry_txt = (
"Parameters and standard errors were estimated using a different "
"dataset and were then applied to this dataset."
)
res = AutoRegResults(
mod,
self.params,
self.cov_params_default,
self.normalized_cov_params,
use_t=self.use_t,
summary_text=smry_txt,
)
return AutoRegResultsWrapper(res) | Apply the fitted parameters to new data unrelated to the original data
Creates a new result object using the current fitted parameters,
applied to a completely new dataset that is assumed to be unrelated to
the model's original data. The new results can then be used for
analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
refit : bool, optional
Whether to re-fit the parameters, using the new dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`).
Returns
-------
AutoRegResults
Updated results object containing results for the new dataset.
See Also
--------
AutoRegResults.append
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Notes
-----
The `endog` argument to this method should consist of new observations
that are not necessarily related to the original model's `endog`
dataset.
Care is needed when using deterministic processes with cyclical
components such as seasonal dummies or Fourier series. These
deterministic components will align to the first observation
in the data and so it is essential that any new data have the
same initial period.
Examples
--------
>>> import pandas as pd
>>> from statsmodels.tsa.ar_model import AutoReg
>>> index = pd.period_range(start='2000', periods=3, freq='Y')
>>> original_observations = pd.Series([1.2, 1.5, 1.8], index=index)
>>> mod = AutoReg(original_observations, lags=1, trend="n")
>>> res = mod.fit()
>>> print(res.params)
y.L1 1.219512
dtype: float64
>>> print(res.fittedvalues)
2001 1.463415
2002 1.829268
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2003 2.195122
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='1980', periods=3, freq='Y')
>>> new_observations = pd.Series([1.4, 0.3, 1.2], index=new_index)
>>> new_res = res.apply(new_observations)
>>> print(new_res.params)
y.L1 1.219512
dtype: float64
>>> print(new_res.fittedvalues)
1981 1.707317
1982 0.365854
Freq: A-DEC, dtype: float64
>>> print(new_res.forecast(1))
1983 1.463415
Freq: A-DEC, dtype: float64 | apply | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def append(self, endog, exog=None, refit=False, fit_kwargs=None):
"""
Append observations to the ones used to fit the model
Creates a new result object using the current fitted parameters
where additional observations are appended to the data used
to fit the model. The new results can then be used for
analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
refit : bool, optional
Whether to re-fit the parameters, using the new dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`).
Returns
-------
AutoRegResults
Updated results object containing results for the new dataset.
See Also
--------
AutoRegResults.apply
statsmodels.tsa.statespace.mlemodel.MLEResults.append
Notes
-----
The endog and exog arguments to this method must be formatted in the
same way (e.g. Pandas Series versus Numpy array) as were the endog
and exog arrays passed to the original model.
The endog argument to this method should consist of new observations
that occurred directly after the last element of endog. For any other
kind of dataset, see the apply method.
Examples
--------
>>> import pandas as pd
>>> from statsmodels.tsa.ar_model import AutoReg
>>> index = pd.period_range(start='2000', periods=3, freq='Y')
>>> original_observations = pd.Series([1.2, 1.4, 1.8], index=index)
>>> mod = AutoReg(original_observations, lags=1, trend="n")
>>> res = mod.fit()
>>> print(res.params)
y.L1 1.235294
dtype: float64
>>> print(res.fittedvalues)
2001 1.482353
2002 1.729412
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2003 2.223529
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='2003', periods=3, freq='Y')
>>> new_observations = pd.Series([2.1, 2.4, 2.7], index=new_index)
>>> updated_res = res.append(new_observations)
>>> print(updated_res.params)
y.L1 1.235294
dtype: float64
>>> print(updated_res.fittedvalues)
dtype: float64
2001 1.482353
2002 1.729412
2003 2.223529
2004 2.594118
2005 2.964706
Freq: A-DEC, dtype: float64
>>> print(updated_res.forecast(1))
2006 3.335294
Freq: A-DEC, dtype: float64
"""
def _check(orig, new, name, use_pandas=True):
from statsmodels.tsa.statespace.mlemodel import _check_index
typ = type(orig)
if not isinstance(new, typ):
raise TypeError(
f"{name} must have the same type as the {name} used to "
f"originally create the model ({typ.__name__})."
)
if not use_pandas:
return np.concatenate([orig, new])
start = len(orig)
end = start + len(new) - 1
_, _, _, append_ix = self.model._get_prediction_index(start, end)
_check_index(append_ix, new, title=name)
return pd.concat([orig, new], axis=0)
existing = self.model
no_exog = existing.exog is None
if no_exog != (exog is None):
if no_exog:
err = (
"Original model does not contain exog data but exog data "
"passed"
)
else:
err = "Original model has exog data but not exog data passed"
raise ValueError(err)
if isinstance(existing.data.orig_endog, (pd.Series, pd.DataFrame)):
endog = _check(existing.data.orig_endog, endog, "endog")
else:
endog = _check(
existing.endog, np.asarray(endog), "endog", use_pandas=False
)
if isinstance(existing.data.orig_exog, (pd.Series, pd.DataFrame)):
exog = _check(existing.data.orig_exog, exog, "exog")
elif exog is not None:
exog = _check(
existing.exog, np.asarray(exog), "endog", use_pandas=False
)
return self.apply(endog, exog, refit=refit, fit_kwargs=fit_kwargs) | Append observations to the ones used to fit the model
Creates a new result object using the current fitted parameters
where additional observations are appended to the data used
to fit the model. The new results can then be used for
analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
refit : bool, optional
Whether to re-fit the parameters, using the new dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`).
Returns
-------
AutoRegResults
Updated results object containing results for the new dataset.
See Also
--------
AutoRegResults.apply
statsmodels.tsa.statespace.mlemodel.MLEResults.append
Notes
-----
The endog and exog arguments to this method must be formatted in the
same way (e.g. Pandas Series versus Numpy array) as were the endog
and exog arrays passed to the original model.
The endog argument to this method should consist of new observations
that occurred directly after the last element of endog. For any other
kind of dataset, see the apply method.
Examples
--------
>>> import pandas as pd
>>> from statsmodels.tsa.ar_model import AutoReg
>>> index = pd.period_range(start='2000', periods=3, freq='Y')
>>> original_observations = pd.Series([1.2, 1.4, 1.8], index=index)
>>> mod = AutoReg(original_observations, lags=1, trend="n")
>>> res = mod.fit()
>>> print(res.params)
y.L1 1.235294
dtype: float64
>>> print(res.fittedvalues)
2001 1.482353
2002 1.729412
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2003 2.223529
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='2003', periods=3, freq='Y')
>>> new_observations = pd.Series([2.1, 2.4, 2.7], index=new_index)
>>> updated_res = res.append(new_observations)
>>> print(updated_res.params)
y.L1 1.235294
dtype: float64
>>> print(updated_res.fittedvalues)
dtype: float64
2001 1.482353
2002 1.729412
2003 2.223529
2004 2.594118
2005 2.964706
Freq: A-DEC, dtype: float64
>>> print(updated_res.forecast(1))
2006 3.335294
Freq: A-DEC, dtype: float64 | append | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def ic_no_data():
"""Fake mod and results to handle no regressor case"""
mod = SimpleNamespace(
nobs=y.shape[0], endog=y, exog=np.empty((y.shape[0], 0))
)
llf = OLS.loglike(mod, np.empty(0))
res = SimpleNamespace(
resid=y, nobs=y.shape[0], llf=llf, df_model=0, k_constant=0
)
return compute_ics(res) | Fake mod and results to handle no regressor case | ar_select_order.ic_no_data | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def ar_select_order(
endog,
maxlag,
ic="bic",
glob=False,
trend: Literal["n", "c", "ct", "ctt"] = "c",
seasonal=False,
exog=None,
hold_back=None,
period=None,
missing="none",
old_names=False,
):
"""
Autoregressive AR-X(p) model order selection.
Parameters
----------
endog : array_like
A 1-d endogenous response variable. The independent variable.
maxlag : int
The maximum lag to consider.
ic : {'aic', 'hqic', 'bic'}
The information criterion to use in the selection.
glob : bool
Flag indicating where to use a global search across all combinations
of lags. In practice, this option is not computational feasible when
maxlag is larger than 15 (or perhaps 20) since the global search
requires fitting 2**maxlag models.\n%(auto_reg_params)s
Returns
-------
AROrderSelectionResults
A results holder containing the model and the complete set of
information criteria for all models fit.
Examples
--------
>>> from statsmodels.tsa.ar_model import ar_select_order
>>> data = sm.datasets.sunspots.load_pandas().data['SUNACTIVITY']
Determine the optimal lag structure
>>> mod = ar_select_order(data, maxlag=13)
>>> mod.ar_lags
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
Determine the optimal lag structure with seasonal terms
>>> mod = ar_select_order(data, maxlag=13, seasonal=True, period=12)
>>> mod.ar_lags
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
Globally determine the optimal lag structure
>>> mod = ar_select_order(data, maxlag=13, glob=True)
>>> mod.ar_lags
array([1, 2, 9])
"""
full_mod = AutoReg(
endog,
maxlag,
trend=trend,
seasonal=seasonal,
exog=exog,
hold_back=hold_back,
period=period,
missing=missing,
old_names=old_names,
)
nexog = full_mod.exog.shape[1] if full_mod.exog is not None else 0
y, x = full_mod._y, full_mod._x
base_col = x.shape[1] - nexog - maxlag
sel = np.ones(x.shape[1], dtype=bool)
ics: list[tuple[int | tuple[int, ...], tuple[float, float, float]]] = []
def compute_ics(res):
nobs = res.nobs
df_model = res.df_model
sigma2 = 1.0 / nobs * sumofsq(res.resid)
llf = -nobs * (np.log(2 * np.pi * sigma2) + 1) / 2
res = SimpleNamespace(
nobs=nobs, df_model=df_model, sigma2=sigma2, llf=llf
)
aic = call_cached_func(AutoRegResults.aic, res)
bic = call_cached_func(AutoRegResults.bic, res)
hqic = call_cached_func(AutoRegResults.hqic, res)
return aic, bic, hqic
def ic_no_data():
"""Fake mod and results to handle no regressor case"""
mod = SimpleNamespace(
nobs=y.shape[0], endog=y, exog=np.empty((y.shape[0], 0))
)
llf = OLS.loglike(mod, np.empty(0))
res = SimpleNamespace(
resid=y, nobs=y.shape[0], llf=llf, df_model=0, k_constant=0
)
return compute_ics(res)
if not glob:
sel[base_col : base_col + maxlag] = False
for i in range(maxlag + 1):
sel[base_col : base_col + i] = True
if not np.any(sel):
ics.append((0, ic_no_data()))
continue
res = OLS(y, x[:, sel]).fit()
lags = tuple(j for j in range(1, i + 1))
lags = 0 if not lags else lags
ics.append((lags, compute_ics(res)))
else:
bits = np.arange(2**maxlag, dtype=np.int32)[:, None]
bits = bits.view(np.uint8)
bits = np.unpackbits(bits).reshape(-1, 32)
for i in range(4):
bits[:, 8 * i : 8 * (i + 1)] = bits[:, 8 * i : 8 * (i + 1)][
:, ::-1
]
masks = bits[:, :maxlag]
for mask in masks:
sel[base_col : base_col + maxlag] = mask
if not np.any(sel):
ics.append((0, ic_no_data()))
continue
res = OLS(y, x[:, sel]).fit()
lags = tuple(np.where(mask)[0] + 1)
lags = 0 if not lags else lags
ics.append((lags, compute_ics(res)))
key_loc = {"aic": 0, "bic": 1, "hqic": 2}[ic]
ics = sorted(ics, key=lambda x: x[1][key_loc])
selected_model = ics[0][0]
mod = AutoReg(
endog,
selected_model,
trend=trend,
seasonal=seasonal,
exog=exog,
hold_back=hold_back,
period=period,
missing=missing,
old_names=old_names,
)
return AROrderSelectionResults(mod, ics, trend, seasonal, period) | Autoregressive AR-X(p) model order selection.
Parameters
----------
endog : array_like
A 1-d endogenous response variable. The independent variable.
maxlag : int
The maximum lag to consider.
ic : {'aic', 'hqic', 'bic'}
The information criterion to use in the selection.
glob : bool
Flag indicating where to use a global search across all combinations
of lags. In practice, this option is not computational feasible when
maxlag is larger than 15 (or perhaps 20) since the global search
requires fitting 2**maxlag models.\n%(auto_reg_params)s
Returns
-------
AROrderSelectionResults
A results holder containing the model and the complete set of
information criteria for all models fit.
Examples
--------
>>> from statsmodels.tsa.ar_model import ar_select_order
>>> data = sm.datasets.sunspots.load_pandas().data['SUNACTIVITY']
Determine the optimal lag structure
>>> mod = ar_select_order(data, maxlag=13)
>>> mod.ar_lags
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
Determine the optimal lag structure with seasonal terms
>>> mod = ar_select_order(data, maxlag=13, seasonal=True, period=12)
>>> mod.ar_lags
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
Globally determine the optimal lag structure
>>> mod = ar_select_order(data, maxlag=13, glob=True)
>>> mod.ar_lags
array([1, 2, 9]) | ar_select_order | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def model(self) -> AutoReg:
"""The model selected using the chosen information criterion."""
return self._model | The model selected using the chosen information criterion. | model | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def seasonal(self) -> bool:
"""Flag indicating if a seasonal component is included."""
return self._seasonal | Flag indicating if a seasonal component is included. | seasonal | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def trend(self) -> Literal["n", "c", "ct", "ctt"]:
"""The trend included in the model selection."""
return self._trend | The trend included in the model selection. | trend | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def aic(self) -> dict[int | tuple[int, ...], float]:
"""
The Akaike information criterion for the models fit.
Returns
-------
dict[tuple, float]
"""
return self._aic | The Akaike information criterion for the models fit.
Returns
-------
dict[tuple, float] | aic | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def bic(self) -> dict[int | tuple[int, ...], float]:
"""
The Bayesian (Schwarz) information criteria for the models fit.
Returns
-------
dict[tuple, float]
"""
return self._bic | The Bayesian (Schwarz) information criteria for the models fit.
Returns
-------
dict[tuple, float] | bic | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def hqic(self) -> dict[int | tuple[int, ...], float]:
"""
The Hannan-Quinn information criteria for the models fit.
Returns
-------
dict[tuple, float]
"""
return self._hqic | The Hannan-Quinn information criteria for the models fit.
Returns
-------
dict[tuple, float] | hqic | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def ar_lags(self) -> list[int] | None:
"""The lags included in the selected model."""
return self._model.ar_lags | The lags included in the selected model. | ar_lags | python | statsmodels/statsmodels | statsmodels/tsa/ar_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/ar_model.py | BSD-3-Clause |
def add_trend(x, trend="c", prepend=False, has_constant="skip"):
"""
Add a trend and/or constant to an array.
Parameters
----------
x : array_like
Original array of data.
trend : str {'n', 'c', 't', 'ct', 'ctt'}
The trend to add.
* 'n' add no trend.
* 'c' add constant only.
* 't' add trend only.
* 'ct' add constant and linear trend.
* 'ctt' add constant and linear and quadratic trend.
prepend : bool
If True, prepends the new data to the columns of X.
has_constant : str {'raise', 'add', 'skip'}
Controls what happens when trend is 'c' and a constant column already
exists in x. 'raise' will raise an error. 'add' will add a column of
1s. 'skip' will return the data without change. 'skip' is the default.
Returns
-------
array_like
The original data with the additional trend columns. If x is a
pandas Series or DataFrame, then the trend column names are 'const',
'trend' and 'trend_squared'.
See Also
--------
statsmodels.tools.tools.add_constant
Add a constant column to an array.
Notes
-----
Returns columns as ['ctt','ct','c'] whenever applicable. There is currently
no checking for an existing trend.
"""
prepend = bool_like(prepend, "prepend")
trend = string_like(trend, "trend", options=("n", "c", "t", "ct", "ctt"))
has_constant = string_like(
has_constant, "has_constant", options=("raise", "add", "skip")
)
# TODO: could be generalized for trend of aribitrary order
columns = ["const", "trend", "trend_squared"]
if trend == "n":
return x.copy()
elif trend == "c": # handles structured arrays
columns = columns[:1]
trendorder = 0
elif trend == "ct" or trend == "t":
columns = columns[:2]
if trend == "t":
columns = columns[1:2]
trendorder = 1
elif trend == "ctt":
trendorder = 2
if _is_recarray(x):
from statsmodels.tools.sm_exceptions import recarray_exception
raise NotImplementedError(recarray_exception)
is_pandas = _is_using_pandas(x, None)
if is_pandas:
if isinstance(x, pd.Series):
x = pd.DataFrame(x)
else:
x = x.copy()
else:
x = np.asanyarray(x)
nobs = len(x)
trendarr = np.vander(
np.arange(1, nobs + 1, dtype=np.float64), trendorder + 1
)
# put in order ctt
trendarr = np.fliplr(trendarr)
if trend == "t":
trendarr = trendarr[:, 1]
if "c" in trend:
if is_pandas:
# Mixed type protection
def safe_is_const(s):
try:
return np.ptp(s) == 0.0 and np.any(s != 0.0)
except Exception:
return False
col_const = x.apply(safe_is_const, 0)
else:
ptp0 = np.ptp(np.asanyarray(x), axis=0)
col_is_const = ptp0 == 0
nz_const = col_is_const & (x[0] != 0)
col_const = nz_const
if np.any(col_const):
if has_constant == "raise":
if x.ndim == 1:
base_err = "x is constant."
else:
columns = np.arange(x.shape[1])[col_const]
if isinstance(x, pd.DataFrame):
columns = x.columns
const_cols = ", ".join([str(c) for c in columns])
base_err = (
"x contains one or more constant columns. Column(s) "
f"{const_cols} are constant."
)
raise ValueError(
f"{base_err} Adding a constant with trend='{trend}' is "
"not allowed."
)
elif has_constant == "skip":
columns = columns[1:]
trendarr = trendarr[:, 1:]
order = 1 if prepend else -1
if is_pandas:
trendarr = pd.DataFrame(trendarr, index=x.index, columns=columns)
x = [trendarr, x]
x = pd.concat(x[::order], axis=1)
else:
x = [trendarr, x]
x = np.column_stack(x[::order])
return x | Add a trend and/or constant to an array.
Parameters
----------
x : array_like
Original array of data.
trend : str {'n', 'c', 't', 'ct', 'ctt'}
The trend to add.
* 'n' add no trend.
* 'c' add constant only.
* 't' add trend only.
* 'ct' add constant and linear trend.
* 'ctt' add constant and linear and quadratic trend.
prepend : bool
If True, prepends the new data to the columns of X.
has_constant : str {'raise', 'add', 'skip'}
Controls what happens when trend is 'c' and a constant column already
exists in x. 'raise' will raise an error. 'add' will add a column of
1s. 'skip' will return the data without change. 'skip' is the default.
Returns
-------
array_like
The original data with the additional trend columns. If x is a
pandas Series or DataFrame, then the trend column names are 'const',
'trend' and 'trend_squared'.
See Also
--------
statsmodels.tools.tools.add_constant
Add a constant column to an array.
Notes
-----
Returns columns as ['ctt','ct','c'] whenever applicable. There is currently
no checking for an existing trend. | add_trend | python | statsmodels/statsmodels | statsmodels/tsa/tsatools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/tsatools.py | BSD-3-Clause |
def add_lag(x, col=None, lags=1, drop=False, insert=True):
"""
Returns an array with lags included given an array.
Parameters
----------
x : array_like
An array or NumPy ndarray subclass. Can be either a 1d or 2d array with
observations in columns.
col : int or None
`col` can be an int of the zero-based column index. If it's a
1d array `col` can be None.
lags : int
The number of lags desired.
drop : bool
Whether to keep the contemporaneous variable for the data.
insert : bool or int
If True, inserts the lagged values after `col`. If False, appends
the data. If int inserts the lags at int.
Returns
-------
array : ndarray
Array with lags
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.macrodata.load()
>>> data = data.data[['year','quarter','realgdp','cpi']]
>>> data = sm.tsa.add_lag(data, 'realgdp', lags=2)
Notes
-----
Trims the array both forward and backward, so that the array returned
so that the length of the returned array is len(`X`) - lags. The lags are
returned in increasing order, ie., t-1,t-2,...,t-lags
"""
lags = int_like(lags, "lags")
drop = bool_like(drop, "drop")
x = array_like(x, "x", ndim=2)
if col is None:
col = 0
# handle negative index
if col < 0:
col = x.shape[1] + col
if x.ndim == 1:
x = x[:, None]
contemp = x[:, col]
if insert is True:
ins_idx = col + 1
elif insert is False:
ins_idx = x.shape[1]
else:
if insert < 0: # handle negative index
insert = x.shape[1] + insert + 1
if insert > x.shape[1]:
insert = x.shape[1]
warnings.warn(
"insert > number of variables, inserting at the"
" last position",
ValueWarning,
)
ins_idx = insert
ndlags = lagmat(contemp, lags, trim="Both")
first_cols = lrange(ins_idx)
last_cols = lrange(ins_idx, x.shape[1])
if drop:
if col in first_cols:
first_cols.pop(first_cols.index(col))
else:
last_cols.pop(last_cols.index(col))
return np.column_stack((x[lags:, first_cols], ndlags, x[lags:, last_cols])) | Returns an array with lags included given an array.
Parameters
----------
x : array_like
An array or NumPy ndarray subclass. Can be either a 1d or 2d array with
observations in columns.
col : int or None
`col` can be an int of the zero-based column index. If it's a
1d array `col` can be None.
lags : int
The number of lags desired.
drop : bool
Whether to keep the contemporaneous variable for the data.
insert : bool or int
If True, inserts the lagged values after `col`. If False, appends
the data. If int inserts the lags at int.
Returns
-------
array : ndarray
Array with lags
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.macrodata.load()
>>> data = data.data[['year','quarter','realgdp','cpi']]
>>> data = sm.tsa.add_lag(data, 'realgdp', lags=2)
Notes
-----
Trims the array both forward and backward, so that the array returned
so that the length of the returned array is len(`X`) - lags. The lags are
returned in increasing order, ie., t-1,t-2,...,t-lags | add_lag | python | statsmodels/statsmodels | statsmodels/tsa/tsatools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/tsatools.py | BSD-3-Clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.