code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def _process_quantiles(self, x, dim): """ Adjust quantiles array so that last axis labels the components of each data point. """ x = np.asarray(x, dtype=float) if x.ndim == 0: x = x[np.newaxis] elif x.ndim == 1: if dim == 1: x = x[:, np.newaxis] else: x = x[np.newaxis, :] return x
Adjust quantiles array so that last axis labels the components of each data point.
_process_quantiles
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def _process_parameters(self, loc, shape, df): """ Infer dimensionality from location array and shape matrix, handle defaults, and ensure compatible dimensions. """ if loc is None and shape is None: loc = np.asarray(0, dtype=float) shape = np.asarray(1, dtype=float) dim = 1 elif loc is None: shape = np.asarray(shape, dtype=float) if shape.ndim < 2: dim = 1 else: dim = shape.shape[0] loc = np.zeros(dim) elif shape is None: loc = np.asarray(loc, dtype=float) dim = loc.size shape = np.eye(dim) else: shape = np.asarray(shape, dtype=float) loc = np.asarray(loc, dtype=float) dim = loc.size if dim == 1: loc.shape = (1,) shape.shape = (1, 1) if loc.ndim != 1 or loc.shape[0] != dim: raise ValueError("Array 'loc' must be a vector of length %d." % dim) if shape.ndim == 0: shape = shape * np.eye(dim) elif shape.ndim == 1: shape = np.diag(shape) elif shape.ndim == 2 and shape.shape != (dim, dim): rows, cols = shape.shape if rows != cols: msg = ("Array 'cov' must be square if it is two dimensional," " but cov.shape = %s." % str(shape.shape)) else: msg = ("Dimension mismatch: array 'cov' is of shape %s," " but 'loc' is a vector of length %d.") msg = msg % (str(shape.shape), len(loc)) raise ValueError(msg) elif shape.ndim > 2: raise ValueError("Array 'cov' must be at most two-dimensional," " but cov.ndim = %d" % shape.ndim) # Process degrees of freedom. if df is None: df = 1 elif df <= 0: raise ValueError("'df' must be greater than zero.") elif np.isnan(df): raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.") return dim, loc, shape, df
Infer dimensionality from location array and shape matrix, handle defaults, and ensure compatible dimensions.
_process_parameters
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def __init__(self, loc=None, shape=1, df=1, allow_singular=False, seed=None): """ Create a frozen multivariate t distribution. Parameters ---------- %(_mvt_doc_default_callparams)s Examples -------- >>> loc = np.zeros(3) >>> shape = np.eye(3) >>> df = 10 >>> dist = multivariate_t(loc, shape, df) >>> dist.rvs() array([[ 0.81412036, -1.53612361, 0.42199647]]) >>> dist.pdf([1, 1, 1]) array([0.01237803]) """ self._dist = multivariate_t_gen(seed) dim, loc, shape, df = self._dist._process_parameters(loc, shape, df) self.dim, self.loc, self.shape, self.df = dim, loc, shape, df self.shape_info = _PSD(shape, allow_singular=allow_singular)
Create a frozen multivariate t distribution. Parameters ---------- %(_mvt_doc_default_callparams)s Examples -------- >>> loc = np.zeros(3) >>> shape = np.eye(3) >>> df = 10 >>> dist = multivariate_t(loc, shape, df) >>> dist.rvs() array([[ 0.81412036, -1.53612361, 0.42199647]]) >>> dist.pdf([1, 1, 1]) array([0.01237803])
__init__
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def lstsq(a, b, rcond=None): """ Shim that allows modern rcond setting with backward compat for NumPY earlier than 1.14 """ if NP_LT_114 and rcond is None: rcond = -1 return np.linalg.lstsq(a, b, rcond=rcond)
Shim that allows modern rcond setting with backward compat for NumPY earlier than 1.14
lstsq
python
statsmodels/statsmodels
statsmodels/compat/numpy.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/numpy.py
BSD-3-Clause
def is_int_index(index: pd.Index) -> bool: """ Check if an index is integral Parameters ---------- index : pd.Index Any numeric index Returns ------- bool True if is an index with a standard integral type """ return ( isinstance(index, pd.Index) and isinstance(index.dtype, np.dtype) and np.issubdtype(index.dtype, np.integer) )
Check if an index is integral Parameters ---------- index : pd.Index Any numeric index Returns ------- bool True if is an index with a standard integral type
is_int_index
python
statsmodels/statsmodels
statsmodels/compat/pandas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pandas.py
BSD-3-Clause
def is_float_index(index: pd.Index) -> bool: """ Check if an index is floating Parameters ---------- index : pd.Index Any numeric index Returns ------- bool True if an index with a standard numpy floating dtype """ return ( isinstance(index, pd.Index) and isinstance(index.dtype, np.dtype) and np.issubdtype(index.dtype, np.floating) )
Check if an index is floating Parameters ---------- index : pd.Index Any numeric index Returns ------- bool True if an index with a standard numpy floating dtype
is_float_index
python
statsmodels/statsmodels
statsmodels/compat/pandas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pandas.py
BSD-3-Clause
def rands_array(nchars, size, dtype="O"): """ Generate an array of byte strings. """ rands_chars = np.array( list(string.ascii_letters + string.digits), dtype=(np.str_, 1) ) retval = ( np.random.choice(rands_chars, size=nchars * np.prod(size)) .view((np.str_, nchars)) .reshape(size) ) if dtype is None: return retval else: return retval.astype(dtype)
Generate an array of byte strings.
rands_array
python
statsmodels/statsmodels
statsmodels/compat/pandas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pandas.py
BSD-3-Clause
def make_dataframe(): """ Simple verion of pandas._testing.makeDataFrame """ n = 30 k = 4 index = pd.Index(rands_array(nchars=10, size=n), name=None) data = { c: pd.Series(np.random.randn(n), index=index) for c in string.ascii_uppercase[:k] } return pd.DataFrame(data)
Simple verion of pandas._testing.makeDataFrame
make_dataframe
python
statsmodels/statsmodels
statsmodels/compat/pandas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pandas.py
BSD-3-Clause
def to_numpy(po: pd.DataFrame) -> np.ndarray: """ Workaround legacy pandas lacking to_numpy Parameters ---------- po : Pandas obkect Returns ------- ndarray A numpy array """ try: return po.to_numpy() except AttributeError: return po.values
Workaround legacy pandas lacking to_numpy Parameters ---------- po : Pandas obkect Returns ------- ndarray A numpy array
to_numpy
python
statsmodels/statsmodels
statsmodels/compat/pandas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pandas.py
BSD-3-Clause
def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, "temporary_class", (), {})
Create a base class with a metaclass.
with_metaclass
python
statsmodels/statsmodels
statsmodels/compat/python.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/python.py
BSD-3-Clause
def get_all_sorted_knots( x, n_inner_knots=None, inner_knots=None, lower_bound=None, upper_bound=None ): """Gets all knots locations with lower and upper exterior knots included. If needed, inner knots are computed as equally spaced quantiles of the input data falling between given lower and upper bounds. :param x: The 1-d array data values. :param n_inner_knots: Number of inner knots to compute. :param inner_knots: Provided inner knots if any. :param lower_bound: The lower exterior knot location. If unspecified, the minimum of ``x`` values is used. :param upper_bound: The upper exterior knot location. If unspecified, the maximum of ``x`` values is used. :return: The array of ``n_inner_knots + 2`` distinct knots. :raise ValueError: for various invalid parameters sets or if unable to compute ``n_inner_knots + 2`` distinct knots. """ if lower_bound is None and x.size == 0: raise ValueError( "Cannot set lower exterior knot location: empty " "input data and lower_bound not specified." ) elif lower_bound is None and x.size != 0: lower_bound = np.min(x) if upper_bound is None and x.size == 0: raise ValueError( "Cannot set upper exterior knot location: empty " "input data and upper_bound not specified." ) elif upper_bound is None and x.size != 0: upper_bound = np.max(x) if upper_bound < lower_bound: raise ValueError( "lower_bound > upper_bound (%r > %r)" % (lower_bound, upper_bound) ) if inner_knots is None and n_inner_knots is not None: if n_inner_knots < 0: raise ValueError( "Invalid requested number of inner knots: %r" % (n_inner_knots,) ) x = x[(lower_bound <= x) & (x <= upper_bound)] x = np.unique(x) if x.size != 0: inner_knots_q = np.linspace(0, 100, n_inner_knots + 2)[1:-1] # .tolist() is necessary to work around a bug in numpy 1.8 inner_knots = np.asarray(np.percentile(x, inner_knots_q.tolist())) elif n_inner_knots == 0: inner_knots = np.array([]) else: raise ValueError( "No data values between lower_bound(=%r) and " "upper_bound(=%r): cannot compute requested " "%r inner knot(s)." % (lower_bound, upper_bound, n_inner_knots) ) elif inner_knots is not None: inner_knots = np.unique(inner_knots) if n_inner_knots is not None and n_inner_knots != inner_knots.size: raise ValueError( "Needed number of inner knots=%r does not match " "provided number of inner knots=%r." % (n_inner_knots, inner_knots.size) ) n_inner_knots = inner_knots.size if np.any(inner_knots < lower_bound): raise ValueError( "Some knot values (%s) fall below lower bound " "(%r)." % (inner_knots[inner_knots < lower_bound], lower_bound) ) if np.any(inner_knots > upper_bound): raise ValueError( "Some knot values (%s) fall above upper bound " "(%r)." % (inner_knots[inner_knots > upper_bound], upper_bound) ) else: raise ValueError("Must specify either 'n_inner_knots' or 'inner_knots'.") all_knots = np.concatenate(([lower_bound, upper_bound], inner_knots)) all_knots = np.unique(all_knots) if all_knots.size != n_inner_knots + 2: raise ValueError( "Unable to compute n_inner_knots(=%r) + 2 distinct " "knots: %r data value(s) found between " "lower_bound(=%r) and upper_bound(=%r)." % (n_inner_knots, x.size, lower_bound, upper_bound) ) return all_knots
Gets all knots locations with lower and upper exterior knots included. If needed, inner knots are computed as equally spaced quantiles of the input data falling between given lower and upper bounds. :param x: The 1-d array data values. :param n_inner_knots: Number of inner knots to compute. :param inner_knots: Provided inner knots if any. :param lower_bound: The lower exterior knot location. If unspecified, the minimum of ``x`` values is used. :param upper_bound: The upper exterior knot location. If unspecified, the maximum of ``x`` values is used. :return: The array of ``n_inner_knots + 2`` distinct knots. :raise ValueError: for various invalid parameters sets or if unable to compute ``n_inner_knots + 2`` distinct knots.
get_all_sorted_knots
python
statsmodels/statsmodels
statsmodels/compat/patsy.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/patsy.py
BSD-3-Clause
def _get_init_kwds(self): """return dictionary with extra keys used in model.__init__ """ kwds = {key: getattr(self, key, None) for key in self._init_keys} return kwds
return dictionary with extra keys used in model.__init__
_get_init_kwds
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def from_formula(cls, formula, data, subset=None, drop_cols=None, *args, **kwargs): """ Create a Model from a formula and dataframe. Parameters ---------- formula : str or generic Formula object The formula specifying the model. data : array_like The data for the model. See Notes. subset : array_like An array-like object of booleans, integers, or index values that indicate the subset of df to use in the model. Assumes df is a `pandas.DataFrame`. drop_cols : array_like Columns to drop from the design matrix. Cannot be used to drop terms involving categoricals. *args Additional positional argument that are passed to the model. **kwargs These are passed to the model with one exception. The ``eval_env`` keyword is passed to patsy. It can be either a :class:`patsy:patsy.EvalEnvironment` object or an integer indicating the depth of the namespace to use. For example, the default ``eval_env=0`` uses the calling namespace. If you wish to use a "clean" environment set ``eval_env=-1``. Returns ------- model The model instance. Notes ----- data must define __getitem__ with the keys in the formula terms args and kwargs are passed on to the model instantiation. E.g., a numpy structured or rec array, a dictionary, or a pandas DataFrame. """ # TODO: provide a docs template for args/kwargs from child models # TODO: subset could use syntax. issue #469. mgr = FormulaManager() if subset is not None: data = data.loc[subset] eval_env = kwargs.pop('eval_env', None) if eval_env is None: eval_env = 2 elif eval_env == -1: eval_env = mgr.get_empty_eval_env() elif isinstance(eval_env, int): eval_env += 1 # we're going down the stack again missing = kwargs.get('missing', 'drop') if missing == 'none': # with patsy it's drop or raise. let's raise. missing = 'raise' tmp = handle_formula_data(data, None, formula, depth=eval_env, missing=missing) ((endog, exog), missing_idx, model_spec) = tmp max_endog = cls._formula_max_endog if (max_endog is not None and endog.ndim > 1 and endog.shape[1] > max_endog): raise ValueError('endog has evaluated to an array with multiple ' 'columns that has shape {}. This occurs when ' 'the variable converted to endog is non-numeric' ' (e.g., bool or str).'.format(endog.shape)) if drop_cols is not None and len(drop_cols) > 0: cols = [x for x in exog.columns if x not in drop_cols] if len(cols) < len(exog.columns): exog = exog[cols] spec_cols = list(mgr.get_term_names(model_spec)) for col in drop_cols: try: if mgr.engine == "formulaic" and col == "Intercept": col = "1" spec_cols.remove(col) except ValueError: pass # OK if not present # TODO: Patsy migration, need to add method to handle model_spec = model_spec.subset(spec_cols) kwargs.update({'missing_idx': missing_idx, 'missing': missing, 'formula': formula, # attach formula for unpckling 'model_spec': model_spec}) mod = cls(endog, exog, *args, **kwargs) mod.formula = formula # since we got a dataframe, attach the original mod.data.frame = data return mod
Create a Model from a formula and dataframe. Parameters ---------- formula : str or generic Formula object The formula specifying the model. data : array_like The data for the model. See Notes. subset : array_like An array-like object of booleans, integers, or index values that indicate the subset of df to use in the model. Assumes df is a `pandas.DataFrame`. drop_cols : array_like Columns to drop from the design matrix. Cannot be used to drop terms involving categoricals. *args Additional positional argument that are passed to the model. **kwargs These are passed to the model with one exception. The ``eval_env`` keyword is passed to patsy. It can be either a :class:`patsy:patsy.EvalEnvironment` object or an integer indicating the depth of the namespace to use. For example, the default ``eval_env=0`` uses the calling namespace. If you wish to use a "clean" environment set ``eval_env=-1``. Returns ------- model The model instance. Notes ----- data must define __getitem__ with the keys in the formula terms args and kwargs are passed on to the model instantiation. E.g., a numpy structured or rec array, a dictionary, or a pandas DataFrame.
from_formula
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def endog_names(self): """ Names of endogenous variables. """ return self.data.ynames
Names of endogenous variables.
endog_names
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def exog_names(self) -> list[str] | None: """ Names of exogenous variables. """ return self.data.xnames
Names of exogenous variables.
exog_names
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def fit(self): """ Fit a model to data. """ raise NotImplementedError
Fit a model to data.
fit
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def predict(self, params, exog=None, *args, **kwargs): """ After a model has been fit predict returns the fitted values. This is a placeholder intended to be overwritten by individual models. """ raise NotImplementedError
After a model has been fit predict returns the fitted values. This is a placeholder intended to be overwritten by individual models.
predict
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def initialize(self): """ Initialize (possibly re-initialize) a Model instance. For example, if the the design matrix of a linear model changes then initialized can be used to recompute values using the modified design matrix. """ pass
Initialize (possibly re-initialize) a Model instance. For example, if the the design matrix of a linear model changes then initialized can be used to recompute values using the modified design matrix.
initialize
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def loglike(self, params): """ Log-likelihood of model. Parameters ---------- params : ndarray The model parameters used to compute the log-likelihood. Notes ----- Must be overridden by subclasses. """ raise NotImplementedError
Log-likelihood of model. Parameters ---------- params : ndarray The model parameters used to compute the log-likelihood. Notes ----- Must be overridden by subclasses.
loglike
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def score(self, params): """ Score vector of model. The gradient of logL with respect to each parameter. Parameters ---------- params : ndarray The parameters to use when evaluating the Hessian. Returns ------- ndarray The score vector evaluated at the parameters. """ raise NotImplementedError
Score vector of model. The gradient of logL with respect to each parameter. Parameters ---------- params : ndarray The parameters to use when evaluating the Hessian. Returns ------- ndarray The score vector evaluated at the parameters.
score
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def information(self, params): """ Fisher information matrix of model. Returns -1 * Hessian of the log-likelihood evaluated at params. Parameters ---------- params : ndarray The model parameters. """ raise NotImplementedError
Fisher information matrix of model. Returns -1 * Hessian of the log-likelihood evaluated at params. Parameters ---------- params : ndarray The model parameters.
information
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def hessian(self, params): """ The Hessian matrix of the model. Parameters ---------- params : ndarray The parameters to use when evaluating the Hessian. Returns ------- ndarray The hessian evaluated at the parameters. """ raise NotImplementedError
The Hessian matrix of the model. Parameters ---------- params : ndarray The parameters to use when evaluating the Hessian. Returns ------- ndarray The hessian evaluated at the parameters.
hessian
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def _fit_zeros(self, keep_index=None, start_params=None, return_auxiliary=False, k_params=None, **fit_kwds): """experimental, fit the model subject to zero constraints Intended for internal use cases until we know what we need. API will need to change to handle models with two exog. This is not yet supported by all model subclasses. This is essentially a simplified version of `fit_constrained`, and does not need to use `offset`. The estimation creates a new model with transformed design matrix, exog, and converts the results back to the original parameterization. Some subclasses could use a more efficient calculation than using a new model. Parameters ---------- keep_index : array_like (int or bool) or slice variables that should be dropped. start_params : None or array_like starting values for the optimization. `start_params` needs to be given in the original parameter space and are internally transformed. k_params : int or None If None, then we try to infer from start_params or model. **fit_kwds : keyword arguments fit_kwds are used in the optimization of the transformed model. Returns ------- results : Results instance """ # we need to append index of extra params to keep_index as in # NegativeBinomial if hasattr(self, 'k_extra') and self.k_extra > 0: # we cannot change the original, TODO: should we add keep_index_params? keep_index = np.array(keep_index, copy=True) k = self.exog.shape[1] extra_index = np.arange(k, k + self.k_extra) keep_index_p = np.concatenate((keep_index, extra_index)) else: keep_index_p = keep_index # not all models support start_params, drop if None, hide them in fit_kwds if start_params is not None: fit_kwds['start_params'] = start_params[keep_index_p] k_params = len(start_params) # ignore k_params in this case, or verify consisteny? # build auxiliary model and fit init_kwds = self._get_init_kwds() mod_constr = self.__class__(self.endog, self.exog[:, keep_index], **init_kwds) res_constr = mod_constr.fit(**fit_kwds) # switch name, only need keep_index for params below keep_index = keep_index_p if k_params is None: k_params = self.exog.shape[1] k_params += getattr(self, 'k_extra', 0) params_full = np.zeros(k_params) params_full[keep_index] = res_constr.params # create dummy results Instance, TODO: wire up properly # TODO: this could be moved into separate private method if needed # discrete L1 fit_regularized doens't reestimate AFAICS # RLM does not have method, disp nor warn_convergence keywords # OLS, WLS swallows extra kwds with **kwargs, but does not have method='nm' try: # Note: addding full_output=False causes exceptions res = self.fit(maxiter=0, disp=0, method='nm', skip_hessian=True, warn_convergence=False, start_params=params_full) # we get a wrapper back except (TypeError, ValueError): res = self.fit() # Warning: make sure we are not just changing the wrapper instead of # results #2400 # TODO: do we need to change res._results.scale in some models? if hasattr(res_constr.model, 'scale'): # Note: res.model is self # GLM problem, see #2399, # TODO: remove from model if not needed anymore res.model.scale = res._results.scale = res_constr.model.scale if hasattr(res_constr, 'mle_retvals'): res._results.mle_retvals = res_constr.mle_retvals # not available for not scipy optimization, e.g. glm irls # TODO: what retvals should be required? # res.mle_retvals['fcall'] = res_constr.mle_retvals.get('fcall', np.nan) # res.mle_retvals['iterations'] = res_constr.mle_retvals.get( # 'iterations', np.nan) # res.mle_retvals['converged'] = res_constr.mle_retvals['converged'] # overwrite all mle_settings if hasattr(res_constr, 'mle_settings'): res._results.mle_settings = res_constr.mle_settings res._results.params = params_full if (not hasattr(res._results, 'normalized_cov_params') or res._results.normalized_cov_params is None): res._results.normalized_cov_params = np.zeros((k_params, k_params)) else: res._results.normalized_cov_params[...] = 0 # fancy indexing requires integer array keep_index = np.array(keep_index) res._results.normalized_cov_params[keep_index[:, None], keep_index] = \ res_constr.normalized_cov_params k_constr = res_constr.df_resid - res._results.df_resid if hasattr(res_constr, 'cov_params_default'): res._results.cov_params_default = np.zeros((k_params, k_params)) res._results.cov_params_default[keep_index[:, None], keep_index] = \ res_constr.cov_params_default if hasattr(res_constr, 'cov_type'): res._results.cov_type = res_constr.cov_type res._results.cov_kwds = res_constr.cov_kwds res._results.keep_index = keep_index res._results.df_resid = res_constr.df_resid res._results.df_model = res_constr.df_model res._results.k_constr = k_constr res._results.results_constrained = res_constr # special temporary workaround for RLM # need to be able to override robust covariances if hasattr(res.model, 'M'): del res._results._cache['resid'] del res._results._cache['fittedvalues'] del res._results._cache['sresid'] cov = res._results._cache['bcov_scaled'] # inplace adjustment cov[...] = 0 cov[keep_index[:, None], keep_index] = res_constr.bcov_scaled res._results.cov_params_default = cov return res
experimental, fit the model subject to zero constraints Intended for internal use cases until we know what we need. API will need to change to handle models with two exog. This is not yet supported by all model subclasses. This is essentially a simplified version of `fit_constrained`, and does not need to use `offset`. The estimation creates a new model with transformed design matrix, exog, and converts the results back to the original parameterization. Some subclasses could use a more efficient calculation than using a new model. Parameters ---------- keep_index : array_like (int or bool) or slice variables that should be dropped. start_params : None or array_like starting values for the optimization. `start_params` needs to be given in the original parameter space and are internally transformed. k_params : int or None If None, then we try to infer from start_params or model. **fit_kwds : keyword arguments fit_kwds are used in the optimization of the transformed model. Returns ------- results : Results instance
_fit_zeros
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def _fit_collinear(self, atol=1e-14, rtol=1e-13, **kwds): """experimental, fit of the model without collinear variables This currently uses QR to drop variables based on the given sequence. Options will be added in future, when the supporting functions to identify collinear variables become available. """ # ------ copied from PR #2380 remove when merged x = self.exog tol = atol + rtol * x.var(0) r = np.linalg.qr(x, mode='r') mask = np.abs(r.diagonal()) < np.sqrt(tol) # TODO add to results instance # idx_collinear = np.where(mask)[0] idx_keep = np.where(~mask)[0] return self._fit_zeros(keep_index=idx_keep, **kwds)
experimental, fit of the model without collinear variables This currently uses QR to drop variables based on the given sequence. Options will be added in future, when the supporting functions to identify collinear variables become available.
_fit_collinear
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def initialize(self): """ Initialize (possibly re-initialize) a Model instance. For instance, the design matrix of a linear model may change and some things must be recomputed. """ if not self.score: # right now score is not optional self.score = lambda x: approx_fprime(x, self.loglike) if not self.hessian: pass else: # can use approx_hess_p if we have a gradient if not self.hessian: pass # Initialize is called by # statsmodels.model.LikelihoodModel.__init__ # and should contain any preprocessing that needs to be done for a model if self.exog is not None: # assume constant er = np.linalg.matrix_rank(self.exog) self.df_model = float(er - 1) self.df_resid = float(self.exog.shape[0] - er) else: self.df_model = np.nan self.df_resid = np.nan super().initialize()
Initialize (possibly re-initialize) a Model instance. For instance, the design matrix of a linear model may change and some things must be recomputed.
initialize
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def expandparams(self, params): """ expand to full parameter array when some parameters are fixed Parameters ---------- params : ndarray reduced parameter array Returns ------- paramsfull : ndarray expanded parameter array where fixed parameters are included Notes ----- Calling this requires that self.fixed_params and self.fixed_paramsmask are defined. *developer notes:* This can be used in the log-likelihood to ... this could also be replaced by a more general parameter transformation. """ paramsfull = self.fixed_params.copy() paramsfull[self.fixed_paramsmask] = params return paramsfull
expand to full parameter array when some parameters are fixed Parameters ---------- params : ndarray reduced parameter array Returns ------- paramsfull : ndarray expanded parameter array where fixed parameters are included Notes ----- Calling this requires that self.fixed_params and self.fixed_paramsmask are defined. *developer notes:* This can be used in the log-likelihood to ... this could also be replaced by a more general parameter transformation.
expandparams
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def reduceparams(self, params): """Reduce parameters""" return params[self.fixed_paramsmask]
Reduce parameters
reduceparams
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def loglike(self, params): """Log-likelihood of model at params""" return self.loglikeobs(params).sum(0)
Log-likelihood of model at params
loglike
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def nloglike(self, params): """Negative log-likelihood of model at params""" return -self.loglikeobs(params).sum(0)
Negative log-likelihood of model at params
nloglike
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def loglikeobs(self, params): """ Log-likelihood of the model for all observations at params. Parameters ---------- params : array_like The parameters of the model. Returns ------- loglike : array_like The log likelihood of the model evaluated at `params`. """ return -self.nloglikeobs(params)
Log-likelihood of the model for all observations at params. Parameters ---------- params : array_like The parameters of the model. Returns ------- loglike : array_like The log likelihood of the model evaluated at `params`.
loglikeobs
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def score(self, params): """ Gradient of log-likelihood evaluated at params """ kwds = {} kwds.setdefault('centered', True) return approx_fprime(params, self.loglike, **kwds).ravel()
Gradient of log-likelihood evaluated at params
score
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def score_obs(self, params, **kwds): """ Jacobian/Gradient of log-likelihood evaluated at params for each observation. """ # kwds.setdefault('epsilon', 1e-4) kwds.setdefault('centered', True) return approx_fprime(params, self.loglikeobs, **kwds)
Jacobian/Gradient of log-likelihood evaluated at params for each observation.
score_obs
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def hessian(self, params): """ Hessian of log-likelihood evaluated at params """ from statsmodels.tools.numdiff import approx_hess # need options for hess (epsilon) return approx_hess(params, self.loglike)
Hessian of log-likelihood evaluated at params
hessian
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def hessian_factor(self, params, scale=None, observed=True): """Weights for calculating Hessian Parameters ---------- params : ndarray parameter at which Hessian is evaluated scale : None or float If scale is None, then the default scale will be calculated. Default scale is defined by `self.scaletype` and set in fit. If scale is not None, then it is used as a fixed scale. observed : bool If True, then the observed Hessian is returned. If false then the expected information matrix is returned. Returns ------- hessian_factor : ndarray, 1d A 1d weight vector used in the calculation of the Hessian. The hessian is obtained by `(exog.T * hessian_factor).dot(exog)` """ raise NotImplementedError
Weights for calculating Hessian Parameters ---------- params : ndarray parameter at which Hessian is evaluated scale : None or float If scale is None, then the default scale will be calculated. Default scale is defined by `self.scaletype` and set in fit. If scale is not None, then it is used as a fixed scale. observed : bool If True, then the observed Hessian is returned. If false then the expected information matrix is returned. Returns ------- hessian_factor : ndarray, 1d A 1d weight vector used in the calculation of the Hessian. The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
hessian_factor
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def initialize(self, model, params, **kwargs): """ Initialize (possibly re-initialize) a Results instance. Parameters ---------- model : Model The model instance. params : ndarray The model parameters. **kwargs Any additional keyword arguments required to initialize the model. """ self.params = params self.model = model if hasattr(model, 'k_constant'): self.k_constant = model.k_constant
Initialize (possibly re-initialize) a Results instance. Parameters ---------- model : Model The model instance. params : ndarray The model parameters. **kwargs Any additional keyword arguments required to initialize the model.
initialize
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def predict(self, exog=None, transform=True, *args, **kwargs): """ Call self.model.predict with self.params as the first argument. Parameters ---------- exog : array_like, optional The values for which you want to predict. see Notes below. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. *args Additional arguments to pass to the model, see the predict method of the model for the details. **kwargs Additional keywords arguments to pass to the model, see the predict method of the model for the details. Returns ------- array_like See self.model.predict. Notes ----- The types of exog that are supported depends on whether a formula was used in the specification of the model. If a formula was used, then exog is processed in the same way as the original data. This transformation needs to have key access to the same variable names, and can be a pandas DataFrame or a dict like object that contains numpy arrays. If no formula was used, then the provided exog needs to have the same number of columns as the original exog in the model. No transformation of the data is performed except converting it to a numpy array. Row indices as in pandas data frames are supported, and added to the returned prediction. """ exog, exog_index = self._transform_predict_exog(exog, transform=transform) predict_results = self.model.predict(self.params, exog, *args, **kwargs) if exog_index is not None and not hasattr(predict_results, 'predicted_values'): if predict_results.ndim == 1: return pd.Series(predict_results, index=exog_index) else: return pd.DataFrame(predict_results, index=exog_index) else: return predict_results
Call self.model.predict with self.params as the first argument. Parameters ---------- exog : array_like, optional The values for which you want to predict. see Notes below. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. *args Additional arguments to pass to the model, see the predict method of the model for the details. **kwargs Additional keywords arguments to pass to the model, see the predict method of the model for the details. Returns ------- array_like See self.model.predict. Notes ----- The types of exog that are supported depends on whether a formula was used in the specification of the model. If a formula was used, then exog is processed in the same way as the original data. This transformation needs to have key access to the same variable names, and can be a pandas DataFrame or a dict like object that contains numpy arrays. If no formula was used, then the provided exog needs to have the same number of columns as the original exog in the model. No transformation of the data is performed except converting it to a numpy array. Row indices as in pandas data frames are supported, and added to the returned prediction.
predict
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def summary(self): """ Summary Not implemented """ raise NotImplementedError
Summary Not implemented
summary
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def normalized_cov_params(self): """See specific model class docstring""" raise NotImplementedError
See specific model class docstring
normalized_cov_params
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def use_t(self): """Flag indicating to use the Student's distribution in inference.""" return self._use_t
Flag indicating to use the Student's distribution in inference.
use_t
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def llf(self): """Log-likelihood of model""" return self.model.loglike(self.params)
Log-likelihood of model
llf
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def bse(self): """The standard errors of the parameter estimates.""" # Issue 3299 if ((not hasattr(self, 'cov_params_default')) and (self.normalized_cov_params is None)): bse_ = np.empty(len(self.params)) bse_[:] = np.nan else: with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) bse_ = np.sqrt(np.diag(self.cov_params())) return bse_
The standard errors of the parameter estimates.
bse
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def tvalues(self): """ Return the t-statistic for a given parameter estimate. """ with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) return self.params / self.bse
Return the t-statistic for a given parameter estimate.
tvalues
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def pvalues(self): """The two-tailed p values for the t-stats of the params.""" with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) if self.use_t: df_resid = getattr(self, 'df_resid_inference', self.df_resid) return stats.t.sf(np.abs(self.tvalues), df_resid) * 2 else: return stats.norm.sf(np.abs(self.tvalues)) * 2
The two-tailed p values for the t-stats of the params.
pvalues
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def cov_params(self, r_matrix=None, column=None, scale=None, cov_p=None, other=None): """ Compute the variance/covariance matrix. The variance/covariance matrix can be of a linear contrast of the estimated parameters or all params multiplied by scale which will usually be an estimate of sigma^2. Scale is assumed to be a scalar. Parameters ---------- r_matrix : array_like Can be 1d, or 2d. Can be used alone or with other. column : array_like, optional Must be used on its own. Can be 0d or 1d see below. scale : float, optional Can be specified or not. Default is None, which means that the scale argument is taken from the model. cov_p : ndarray, optional The covariance of the parameters. If not provided, this value is read from `self.normalized_cov_params` or `self.cov_params_default`. other : array_like, optional Can be used when r_matrix is specified. Returns ------- ndarray The covariance matrix of the parameter estimates or of linear combination of parameter estimates. See Notes. Notes ----- (The below are assumed to be in matrix notation.) If no argument is specified returns the covariance matrix of a model ``(scale)*(X.T X)^(-1)`` If contrast is specified it pre and post-multiplies as follows ``(scale) * r_matrix (X.T X)^(-1) r_matrix.T`` If contrast and other are specified returns ``(scale) * r_matrix (X.T X)^(-1) other.T`` If column is specified returns ``(scale) * (X.T X)^(-1)[column,column]`` if column is 0d OR ``(scale) * (X.T X)^(-1)[column][:,column]`` if column is 1d """ if (hasattr(self, 'mle_settings') and self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']): dot_fun = nan_dot else: dot_fun = np.dot if (cov_p is None and self.normalized_cov_params is None and not hasattr(self, 'cov_params_default')): raise ValueError('need covariance of parameters for computing ' '(unnormalized) covariances') if column is not None and (r_matrix is not None or other is not None): raise ValueError('Column should be specified without other ' 'arguments.') if other is not None and r_matrix is None: raise ValueError('other can only be specified with r_matrix') if cov_p is None: if hasattr(self, 'cov_params_default'): cov_p = self.cov_params_default else: if scale is None: scale = self.scale cov_p = self.normalized_cov_params * scale if column is not None: column = np.asarray(column) if column.shape == (): return cov_p[column, column] else: return cov_p[column[:, None], column] elif r_matrix is not None: r_matrix = np.asarray(r_matrix) if r_matrix.shape == (): raise ValueError("r_matrix should be 1d or 2d") if other is None: other = r_matrix else: other = np.asarray(other) tmp = dot_fun(r_matrix, dot_fun(cov_p, np.transpose(other))) return tmp else: # if r_matrix is None and column is None: return cov_p
Compute the variance/covariance matrix. The variance/covariance matrix can be of a linear contrast of the estimated parameters or all params multiplied by scale which will usually be an estimate of sigma^2. Scale is assumed to be a scalar. Parameters ---------- r_matrix : array_like Can be 1d, or 2d. Can be used alone or with other. column : array_like, optional Must be used on its own. Can be 0d or 1d see below. scale : float, optional Can be specified or not. Default is None, which means that the scale argument is taken from the model. cov_p : ndarray, optional The covariance of the parameters. If not provided, this value is read from `self.normalized_cov_params` or `self.cov_params_default`. other : array_like, optional Can be used when r_matrix is specified. Returns ------- ndarray The covariance matrix of the parameter estimates or of linear combination of parameter estimates. See Notes. Notes ----- (The below are assumed to be in matrix notation.) If no argument is specified returns the covariance matrix of a model ``(scale)*(X.T X)^(-1)`` If contrast is specified it pre and post-multiplies as follows ``(scale) * r_matrix (X.T X)^(-1) r_matrix.T`` If contrast and other are specified returns ``(scale) * r_matrix (X.T X)^(-1) other.T`` If column is specified returns ``(scale) * (X.T X)^(-1)[column,column]`` if column is 0d OR ``(scale) * (X.T X)^(-1)[column][:,column]`` if column is 1d
cov_params
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def t_test(self, r_matrix, cov_p=None, use_t=None): """ Compute a t-test for a each linear hypothesis of the form Rb = q. Parameters ---------- r_matrix : {array_like, str, tuple} One of: - array : If an array is given, a p x k 2d array or length k 1d array specifying the linear restrictions. It is assumed that the linear combination is equal to zero. - str : The full hypotheses to test can be given as a string. See the examples. - tuple : A tuple of arrays in the form (R, q). If q is given, can be either a scalar or a length p row vector. cov_p : array_like, optional An alternative estimate for the parameter covariance matrix. If None is given, self.normalized_cov_params is used. use_t : bool, optional If use_t is None, then the default of the model is used. If use_t is True, then the p-values are based on the t distribution. If use_t is False, then the p-values are based on the normal distribution. Returns ------- ContrastResults The results for the test are attributes of this results instance. The available results have the same elements as the parameter table in `summary()`. See Also -------- tvalues : Individual t statistics for the estimated parameters. f_test : Perform an F tests on model parameters. patsy.DesignInfo.linear_constraint : Specify a linear constraint. Examples -------- >>> import numpy as np >>> import statsmodels.api as sm >>> data = sm.datasets.longley.load() >>> data.exog = sm.add_constant(data.exog) >>> results = sm.OLS(data.endog, data.exog).fit() >>> r = np.zeros_like(results.params) >>> r[5:] = [1,-1] >>> print(r) [ 0. 0. 0. 0. 0. 1. -1.] r tests that the coefficients on the 5th and 6th independent variable are the same. >>> T_test = results.t_test(r) >>> print(T_test) Test for Constraints ============================================================================== coef std err t P>|t| [0.025 0.975] ------------------------------------------------------------------------------ c0 -1829.2026 455.391 -4.017 0.003 -2859.368 -799.037 ============================================================================== >>> T_test.effect -1829.2025687192481 >>> T_test.sd 455.39079425193762 >>> T_test.tvalue -4.0167754636411717 >>> T_test.pvalue 0.0015163772380899498 Alternatively, you can specify the hypothesis tests using a string >>> from statsmodels.formula.api import ols >>> dta = sm.datasets.longley.load_pandas().data >>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR' >>> results = ols(formula, dta).fit() >>> hypotheses = 'GNPDEFL = GNP, UNEMP = 2, YEAR/1829 = 1' >>> t_test = results.t_test(hypotheses) >>> print(t_test) Test for Constraints ============================================================================== coef std err t P>|t| [0.025 0.975] ------------------------------------------------------------------------------ c0 15.0977 84.937 0.178 0.863 -177.042 207.238 c1 -2.0202 0.488 -8.231 0.000 -3.125 -0.915 c2 1.0001 0.249 0.000 1.000 0.437 1.563 ============================================================================== """ use_t = bool_like(use_t, "use_t", strict=True, optional=True) if self.params.ndim == 2: names = [f'y{i[0]}_{i[1]}' for i in self.model.data.cov_names] else: names = self.model.data.cov_names mgr = FormulaManager() lc = mgr.get_linear_constraints(r_matrix, names) r_matrix, q_matrix = lc.constraint_matrix, lc.constraint_values num_ttests = r_matrix.shape[0] num_params = r_matrix.shape[1] if (cov_p is None and self.normalized_cov_params is None and not hasattr(self, 'cov_params_default')): raise ValueError('Need covariance of parameters for computing ' 'T statistics') params = self.params.ravel(order="F") if num_params != params.shape[0]: raise ValueError('r_matrix and params are not aligned') if q_matrix is None: q_matrix = np.zeros(num_ttests) else: q_matrix = np.asarray(q_matrix) q_matrix = q_matrix.squeeze() if q_matrix.size > 1: if q_matrix.shape[0] != num_ttests: raise ValueError("r_matrix and q_matrix must have the same " "number of rows") if use_t is None: # switch to use_t false if undefined use_t = (hasattr(self, 'use_t') and self.use_t) _effect = np.dot(r_matrix, params) # Perform the test if num_ttests > 1: _sd = np.sqrt(np.diag(self.cov_params( r_matrix=r_matrix, cov_p=cov_p))) else: _sd = np.sqrt(self.cov_params(r_matrix=r_matrix, cov_p=cov_p)) _t = (_effect - q_matrix) * recipr(_sd) df_resid = getattr(self, 'df_resid_inference', self.df_resid) if use_t: return ContrastResults(effect=_effect, t=_t, sd=_sd, df_denom=df_resid) else: return ContrastResults(effect=_effect, statistic=_t, sd=_sd, df_denom=df_resid, distribution='norm')
Compute a t-test for a each linear hypothesis of the form Rb = q. Parameters ---------- r_matrix : {array_like, str, tuple} One of: - array : If an array is given, a p x k 2d array or length k 1d array specifying the linear restrictions. It is assumed that the linear combination is equal to zero. - str : The full hypotheses to test can be given as a string. See the examples. - tuple : A tuple of arrays in the form (R, q). If q is given, can be either a scalar or a length p row vector. cov_p : array_like, optional An alternative estimate for the parameter covariance matrix. If None is given, self.normalized_cov_params is used. use_t : bool, optional If use_t is None, then the default of the model is used. If use_t is True, then the p-values are based on the t distribution. If use_t is False, then the p-values are based on the normal distribution. Returns ------- ContrastResults The results for the test are attributes of this results instance. The available results have the same elements as the parameter table in `summary()`. See Also -------- tvalues : Individual t statistics for the estimated parameters. f_test : Perform an F tests on model parameters. patsy.DesignInfo.linear_constraint : Specify a linear constraint. Examples -------- >>> import numpy as np >>> import statsmodels.api as sm >>> data = sm.datasets.longley.load() >>> data.exog = sm.add_constant(data.exog) >>> results = sm.OLS(data.endog, data.exog).fit() >>> r = np.zeros_like(results.params) >>> r[5:] = [1,-1] >>> print(r) [ 0. 0. 0. 0. 0. 1. -1.] r tests that the coefficients on the 5th and 6th independent variable are the same. >>> T_test = results.t_test(r) >>> print(T_test) Test for Constraints ============================================================================== coef std err t P>|t| [0.025 0.975] ------------------------------------------------------------------------------ c0 -1829.2026 455.391 -4.017 0.003 -2859.368 -799.037 ============================================================================== >>> T_test.effect -1829.2025687192481 >>> T_test.sd 455.39079425193762 >>> T_test.tvalue -4.0167754636411717 >>> T_test.pvalue 0.0015163772380899498 Alternatively, you can specify the hypothesis tests using a string >>> from statsmodels.formula.api import ols >>> dta = sm.datasets.longley.load_pandas().data >>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR' >>> results = ols(formula, dta).fit() >>> hypotheses = 'GNPDEFL = GNP, UNEMP = 2, YEAR/1829 = 1' >>> t_test = results.t_test(hypotheses) >>> print(t_test) Test for Constraints ============================================================================== coef std err t P>|t| [0.025 0.975] ------------------------------------------------------------------------------ c0 15.0977 84.937 0.178 0.863 -177.042 207.238 c1 -2.0202 0.488 -8.231 0.000 -3.125 -0.915 c2 1.0001 0.249 0.000 1.000 0.437 1.563 ==============================================================================
t_test
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def f_test(self, r_matrix, cov_p=None, invcov=None): """ Compute the F-test for a joint linear hypothesis. This is a special case of `wald_test` that always uses the F distribution. Parameters ---------- r_matrix : {array_like, str, tuple} One of: - array : An r x k array where r is the number of restrictions to test and k is the number of regressors. It is assumed that the linear combination is equal to zero. - str : The full hypotheses to test can be given as a string. See the examples. - tuple : A tuple of arrays in the form (R, q), ``q`` can be either a scalar or a length k row vector. cov_p : array_like, optional An alternative estimate for the parameter covariance matrix. If None is given, self.normalized_cov_params is used. invcov : array_like, optional A q x q array to specify an inverse covariance matrix based on a restrictions matrix. Returns ------- ContrastResults The results for the test are attributes of this results instance. See Also -------- t_test : Perform a single hypothesis test. wald_test : Perform a Wald-test using a quadratic form. statsmodels.stats.contrast.ContrastResults : Test results. patsy.DesignInfo.linear_constraint : Specify a linear constraint. Notes ----- The matrix `r_matrix` is assumed to be non-singular. More precisely, r_matrix (pX pX.T) r_matrix.T is assumed invertible. Here, pX is the generalized inverse of the design matrix of the model. There can be problems in non-OLS models where the rank of the covariance of the noise is not full. Examples -------- >>> import numpy as np >>> import statsmodels.api as sm >>> data = sm.datasets.longley.load() >>> data.exog = sm.add_constant(data.exog) >>> results = sm.OLS(data.endog, data.exog).fit() >>> A = np.identity(len(results.params)) >>> A = A[1:,:] This tests that each coefficient is jointly statistically significantly different from zero. >>> print(results.f_test(A)) <F test: F=array([[ 330.28533923]]), p=4.984030528700946e-10, df_denom=9, df_num=6> Compare this to >>> results.fvalue 330.2853392346658 >>> results.f_pvalue 4.98403096572e-10 >>> B = np.array(([0,0,1,-1,0,0,0],[0,0,0,0,0,1,-1])) This tests that the coefficient on the 2nd and 3rd regressors are equal and jointly that the coefficient on the 5th and 6th regressors are equal. >>> print(results.f_test(B)) <F test: F=array([[ 9.74046187]]), p=0.005605288531708235, df_denom=9, df_num=2> Alternatively, you can specify the hypothesis tests using a string >>> from statsmodels.datasets import longley >>> from statsmodels.formula.api import ols >>> dta = longley.load_pandas().data >>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR' >>> results = ols(formula, dta).fit() >>> hypotheses = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)' >>> f_test = results.f_test(hypotheses) >>> print(f_test) <F test: F=array([[ 144.17976065]]), p=6.322026217355609e-08, df_denom=9, df_num=3> """ res = self.wald_test(r_matrix, cov_p=cov_p, invcov=invcov, use_f=True, scalar=True) return res
Compute the F-test for a joint linear hypothesis. This is a special case of `wald_test` that always uses the F distribution. Parameters ---------- r_matrix : {array_like, str, tuple} One of: - array : An r x k array where r is the number of restrictions to test and k is the number of regressors. It is assumed that the linear combination is equal to zero. - str : The full hypotheses to test can be given as a string. See the examples. - tuple : A tuple of arrays in the form (R, q), ``q`` can be either a scalar or a length k row vector. cov_p : array_like, optional An alternative estimate for the parameter covariance matrix. If None is given, self.normalized_cov_params is used. invcov : array_like, optional A q x q array to specify an inverse covariance matrix based on a restrictions matrix. Returns ------- ContrastResults The results for the test are attributes of this results instance. See Also -------- t_test : Perform a single hypothesis test. wald_test : Perform a Wald-test using a quadratic form. statsmodels.stats.contrast.ContrastResults : Test results. patsy.DesignInfo.linear_constraint : Specify a linear constraint. Notes ----- The matrix `r_matrix` is assumed to be non-singular. More precisely, r_matrix (pX pX.T) r_matrix.T is assumed invertible. Here, pX is the generalized inverse of the design matrix of the model. There can be problems in non-OLS models where the rank of the covariance of the noise is not full. Examples -------- >>> import numpy as np >>> import statsmodels.api as sm >>> data = sm.datasets.longley.load() >>> data.exog = sm.add_constant(data.exog) >>> results = sm.OLS(data.endog, data.exog).fit() >>> A = np.identity(len(results.params)) >>> A = A[1:,:] This tests that each coefficient is jointly statistically significantly different from zero. >>> print(results.f_test(A)) <F test: F=array([[ 330.28533923]]), p=4.984030528700946e-10, df_denom=9, df_num=6> Compare this to >>> results.fvalue 330.2853392346658 >>> results.f_pvalue 4.98403096572e-10 >>> B = np.array(([0,0,1,-1,0,0,0],[0,0,0,0,0,1,-1])) This tests that the coefficient on the 2nd and 3rd regressors are equal and jointly that the coefficient on the 5th and 6th regressors are equal. >>> print(results.f_test(B)) <F test: F=array([[ 9.74046187]]), p=0.005605288531708235, df_denom=9, df_num=2> Alternatively, you can specify the hypothesis tests using a string >>> from statsmodels.datasets import longley >>> from statsmodels.formula.api import ols >>> dta = longley.load_pandas().data >>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR' >>> results = ols(formula, dta).fit() >>> hypotheses = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)' >>> f_test = results.f_test(hypotheses) >>> print(f_test) <F test: F=array([[ 144.17976065]]), p=6.322026217355609e-08, df_denom=9, df_num=3>
f_test
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def wald_test(self, r_matrix, cov_p=None, invcov=None, use_f=None, df_constraints=None, scalar=None): """ Compute a Wald-test for a joint linear hypothesis. Parameters ---------- r_matrix : {array_like, str, tuple} One of: - array : An r x k array where r is the number of restrictions to test and k is the number of regressors. It is assumed that the linear combination is equal to zero. - str : The full hypotheses to test can be given as a string. See the examples. - tuple : A tuple of arrays in the form (R, q), ``q`` can be either a scalar or a length p row vector. cov_p : array_like, optional An alternative estimate for the parameter covariance matrix. If None is given, self.normalized_cov_params is used. invcov : array_like, optional A q x q array to specify an inverse covariance matrix based on a restrictions matrix. use_f : bool If True, then the F-distribution is used. If False, then the asymptotic distribution, chisquare is used. If use_f is None, then the F distribution is used if the model specifies that use_t is True. The test statistic is proportionally adjusted for the distribution by the number of constraints in the hypothesis. df_constraints : int, optional The number of constraints. If not provided the number of constraints is determined from r_matrix. scalar : bool, optional Flag indicating whether the Wald test statistic should be returned as a sclar float. The current behavior is to return an array. This will switch to a scalar float after 0.14 is released. To get the future behavior now, set scalar to True. To silence the warning and retain the legacy behavior, set scalar to False. Returns ------- ContrastResults The results for the test are attributes of this results instance. See Also -------- f_test : Perform an F tests on model parameters. t_test : Perform a single hypothesis test. statsmodels.stats.contrast.ContrastResults : Test results. patsy.DesignInfo.linear_constraint : Specify a linear constraint. Notes ----- The matrix `r_matrix` is assumed to be non-singular. More precisely, r_matrix (pX pX.T) r_matrix.T is assumed invertible. Here, pX is the generalized inverse of the design matrix of the model. There can be problems in non-OLS models where the rank of the covariance of the noise is not full. """ use_f = bool_like(use_f, "use_f", strict=True, optional=True) scalar = bool_like(scalar, "scalar", strict=True, optional=True) if use_f is None: # switch to use_t false if undefined use_f = (hasattr(self, 'use_t') and self.use_t) if self.params.ndim == 2: names = [f'y{i[0]}_{i[1]}' for i in self.model.data.cov_names] else: names = self.model.data.cov_names params = self.params.ravel(order="F") mgr = FormulaManager() lc = mgr.get_linear_constraints(r_matrix, names) r_matrix, q_matrix = lc.constraint_matrix, lc.constraint_values if (self.normalized_cov_params is None and cov_p is None and invcov is None and not hasattr(self, 'cov_params_default')): raise ValueError('need covariance of parameters for computing ' 'F statistics') cparams = np.dot(r_matrix, params[:, None]) J = float(r_matrix.shape[0]) # number of restrictions if q_matrix is None: q_matrix = np.zeros(J) else: q_matrix = np.asarray(q_matrix) if q_matrix.ndim == 1: q_matrix = q_matrix[:, None] if q_matrix.shape[0] != J: raise ValueError("r_matrix and q_matrix must have the same " "number of rows") Rbq = cparams - q_matrix if invcov is None: cov_p = self.cov_params(r_matrix=r_matrix, cov_p=cov_p) if np.isnan(cov_p).max(): raise ValueError("r_matrix performs f_test for using " "dimensions that are asymptotically " "non-normal") invcov = np.linalg.pinv(cov_p) J_ = np.linalg.matrix_rank(cov_p) if J_ < J: warnings.warn('covariance of constraints does not have full ' 'rank. The number of constraints is %d, but ' 'rank is %d' % (J, J_), ValueWarning) J = J_ # TODO streamline computation, we do not need to compute J if given if df_constraints is not None: # let caller override J by df_constraint J = df_constraints if (hasattr(self, 'mle_settings') and self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']): F = nan_dot(nan_dot(Rbq.T, invcov), Rbq) else: F = np.dot(np.dot(Rbq.T, invcov), Rbq) df_resid = getattr(self, 'df_resid_inference', self.df_resid) if scalar is None: warnings.warn( "The behavior of wald_test will change after 0.14 to returning " "scalar test statistic values. To get the future behavior now, " "set scalar to True. To silence this message while retaining " "the legacy behavior, set scalar to False.", FutureWarning ) scalar = False if scalar and F.size == 1: F = float(np.squeeze(F)) if use_f: F /= J return ContrastResults(F=F, df_denom=df_resid, df_num=J) #invcov.shape[0]) else: return ContrastResults(chi2=F, df_denom=J, statistic=F, distribution='chi2', distargs=(J,))
Compute a Wald-test for a joint linear hypothesis. Parameters ---------- r_matrix : {array_like, str, tuple} One of: - array : An r x k array where r is the number of restrictions to test and k is the number of regressors. It is assumed that the linear combination is equal to zero. - str : The full hypotheses to test can be given as a string. See the examples. - tuple : A tuple of arrays in the form (R, q), ``q`` can be either a scalar or a length p row vector. cov_p : array_like, optional An alternative estimate for the parameter covariance matrix. If None is given, self.normalized_cov_params is used. invcov : array_like, optional A q x q array to specify an inverse covariance matrix based on a restrictions matrix. use_f : bool If True, then the F-distribution is used. If False, then the asymptotic distribution, chisquare is used. If use_f is None, then the F distribution is used if the model specifies that use_t is True. The test statistic is proportionally adjusted for the distribution by the number of constraints in the hypothesis. df_constraints : int, optional The number of constraints. If not provided the number of constraints is determined from r_matrix. scalar : bool, optional Flag indicating whether the Wald test statistic should be returned as a sclar float. The current behavior is to return an array. This will switch to a scalar float after 0.14 is released. To get the future behavior now, set scalar to True. To silence the warning and retain the legacy behavior, set scalar to False. Returns ------- ContrastResults The results for the test are attributes of this results instance. See Also -------- f_test : Perform an F tests on model parameters. t_test : Perform a single hypothesis test. statsmodels.stats.contrast.ContrastResults : Test results. patsy.DesignInfo.linear_constraint : Specify a linear constraint. Notes ----- The matrix `r_matrix` is assumed to be non-singular. More precisely, r_matrix (pX pX.T) r_matrix.T is assumed invertible. Here, pX is the generalized inverse of the design matrix of the model. There can be problems in non-OLS models where the rank of the covariance of the noise is not full.
wald_test
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def wald_test_terms(self, skip_single=False, extra_constraints=None, combine_terms=None, scalar=None): """ Compute a sequence of Wald tests for terms over multiple columns. This computes joined Wald tests for the hypothesis that all coefficients corresponding to a `term` are zero. `Terms` are defined by the underlying formula or by string matching. Parameters ---------- skip_single : bool If true, then terms that consist only of a single column and, therefore, refers only to a single parameter is skipped. If false, then all terms are included. extra_constraints : ndarray Additional constraints to test. Note that this input has not been tested. combine_terms : {list[str], None} Each string in this list is matched to the name of the terms or the name of the exogenous variables. All columns whose name includes that string are combined in one joint test. scalar : bool, optional Flag indicating whether the Wald test statistic should be returned as a sclar float. The current behavior is to return an array. This will switch to a scalar float after 0.14 is released. To get the future behavior now, set scalar to True. To silence the warning and retain the legacy behavior, set scalar to False. Returns ------- WaldTestResults The result instance contains `table` which is a pandas DataFrame with the test results: test statistic, degrees of freedom and pvalues. Examples -------- >>> res_ols = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", data).fit() >>> res_ols.wald_test_terms() <class 'statsmodels.stats.contrast.WaldTestResults'> F P>F df constraint df denom Intercept 279.754525 2.37985521351e-22 1 51 C(Duration, Sum) 5.367071 0.0245738436636 1 51 C(Weight, Sum) 12.432445 3.99943118767e-05 2 51 C(Duration, Sum):C(Weight, Sum) 0.176002 0.83912310946 2 51 >>> res_poi = Poisson.from_formula("Days ~ C(Weight) * C(Duration)", \ data).fit(cov_type='HC0') >>> wt = res_poi.wald_test_terms(skip_single=False, \ combine_terms=['Duration', 'Weight']) >>> print(wt) chi2 P>chi2 df constraint Intercept 15.695625 7.43960374424e-05 1 C(Weight) 16.132616 0.000313940174705 2 C(Duration) 1.009147 0.315107378931 1 C(Weight):C(Duration) 0.216694 0.897315972824 2 Duration 11.187849 0.010752286833 3 Weight 30.263368 4.32586407145e-06 4 """ # lazy import mgr = FormulaManager() result = self if extra_constraints is None: extra_constraints = [] if combine_terms is None: combine_terms = [] model_spec = getattr(result.model.data, 'model_spec', None) if model_spec is None and extra_constraints is None: raise ValueError('no constraints, nothing to do') identity = np.eye(len(result.params)) constraints = [] combined = defaultdict(list) if model_spec is not None: for term in model_spec.terms: cols = mgr.get_slice(model_spec, term) name = mgr.get_term_name(term) constraint_matrix = identity[cols] # check if in combined for cname in combine_terms: if cname in name: combined[cname].append(constraint_matrix) k_constraint = constraint_matrix.shape[0] if skip_single: if k_constraint == 1: continue constraints.append((name, constraint_matrix)) combined_constraints = [] for cname in combine_terms: combined_constraints.append((cname, np.vstack(combined[cname]))) else: # check by exog/params names if there is no formula info for col, name in enumerate(result.model.exog_names): constraint_matrix = np.atleast_2d(identity[col]) # check if in combined for cname in combine_terms: if cname in name: combined[cname].append(constraint_matrix) if skip_single: continue constraints.append((name, constraint_matrix)) combined_constraints = [] for cname in combine_terms: combined_constraints.append((cname, np.vstack(combined[cname]))) use_t = result.use_t distribution = ['chi2', 'F'][use_t] res_wald = [] index = [] for name, constraint in constraints + combined_constraints + extra_constraints: wt = result.wald_test(constraint, scalar=scalar) row = [wt.statistic, wt.pvalue, constraint.shape[0]] if use_t: row.append(wt.df_denom) res_wald.append(row) index.append(name) # distribution nerutral names col_names = ['statistic', 'pvalue', 'df_constraint'] if use_t: col_names.append('df_denom') # TODO: maybe move DataFrame creation to results class from pandas import DataFrame table = DataFrame(res_wald, index=index, columns=col_names) res = WaldTestResults(None, distribution, None, table=table) # TODO: remove temp again, added for testing res.temp = constraints + combined_constraints + extra_constraints return res
Compute a sequence of Wald tests for terms over multiple columns. This computes joined Wald tests for the hypothesis that all coefficients corresponding to a `term` are zero. `Terms` are defined by the underlying formula or by string matching. Parameters ---------- skip_single : bool If true, then terms that consist only of a single column and, therefore, refers only to a single parameter is skipped. If false, then all terms are included. extra_constraints : ndarray Additional constraints to test. Note that this input has not been tested. combine_terms : {list[str], None} Each string in this list is matched to the name of the terms or the name of the exogenous variables. All columns whose name includes that string are combined in one joint test. scalar : bool, optional Flag indicating whether the Wald test statistic should be returned as a sclar float. The current behavior is to return an array. This will switch to a scalar float after 0.14 is released. To get the future behavior now, set scalar to True. To silence the warning and retain the legacy behavior, set scalar to False. Returns ------- WaldTestResults The result instance contains `table` which is a pandas DataFrame with the test results: test statistic, degrees of freedom and pvalues. Examples -------- >>> res_ols = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", data).fit() >>> res_ols.wald_test_terms() <class 'statsmodels.stats.contrast.WaldTestResults'> F P>F df constraint df denom Intercept 279.754525 2.37985521351e-22 1 51 C(Duration, Sum) 5.367071 0.0245738436636 1 51 C(Weight, Sum) 12.432445 3.99943118767e-05 2 51 C(Duration, Sum):C(Weight, Sum) 0.176002 0.83912310946 2 51 >>> res_poi = Poisson.from_formula("Days ~ C(Weight) * C(Duration)", \ data).fit(cov_type='HC0') >>> wt = res_poi.wald_test_terms(skip_single=False, \ combine_terms=['Duration', 'Weight']) >>> print(wt) chi2 P>chi2 df constraint Intercept 15.695625 7.43960374424e-05 1 C(Weight) 16.132616 0.000313940174705 2 C(Duration) 1.009147 0.315107378931 1 C(Weight):C(Duration) 0.216694 0.897315972824 2 Duration 11.187849 0.010752286833 3 Weight 30.263368 4.32586407145e-06 4
wald_test_terms
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def t_test_pairwise(self, term_name, method='hs', alpha=0.05, factor_labels=None): """ Perform pairwise t_test with multiple testing corrected p-values. This uses the formula's model_spec encoding contrast matrix and should work for all encodings of a main effect. Parameters ---------- term_name : str The name of the term for which pairwise comparisons are computed. Term names for categorical effects are created by patsy and correspond to the main part of the exog names. method : {str, list[str]} The multiple testing p-value correction to apply. The default is 'hs'. See stats.multipletesting. alpha : float The significance level for multiple testing reject decision. factor_labels : {list[str], None} Labels for the factor levels used for pairwise labels. If not provided, then the labels from the formula's model_spec are used. Returns ------- MultiCompResult The results are stored as attributes, the main attributes are the following two. Other attributes are added for debugging purposes or as background information. - result_frame : pandas DataFrame with t_test results and multiple testing corrected p-values. - contrasts : matrix of constraints of the null hypothesis in the t_test. Notes ----- Status: experimental. Currently only checked for treatment coding with and without specified reference level. Currently there are no multiple testing corrected confidence intervals available. Examples -------- >>> res = ols("np.log(Days+1) ~ C(Weight) + C(Duration)", data).fit() >>> pw = res.t_test_pairwise("C(Weight)") >>> pw.result_frame coef std err t P>|t| Conf. Int. Low 2-1 0.632315 0.230003 2.749157 8.028083e-03 0.171563 3-1 1.302555 0.230003 5.663201 5.331513e-07 0.841803 3-2 0.670240 0.230003 2.914044 5.119126e-03 0.209488 Conf. Int. Upp. pvalue-hs reject-hs 2-1 1.093067 0.010212 True 3-1 1.763307 0.000002 True 3-2 1.130992 0.010212 True """ res = t_test_pairwise(self, term_name, method=method, alpha=alpha, factor_labels=factor_labels) return res
Perform pairwise t_test with multiple testing corrected p-values. This uses the formula's model_spec encoding contrast matrix and should work for all encodings of a main effect. Parameters ---------- term_name : str The name of the term for which pairwise comparisons are computed. Term names for categorical effects are created by patsy and correspond to the main part of the exog names. method : {str, list[str]} The multiple testing p-value correction to apply. The default is 'hs'. See stats.multipletesting. alpha : float The significance level for multiple testing reject decision. factor_labels : {list[str], None} Labels for the factor levels used for pairwise labels. If not provided, then the labels from the formula's model_spec are used. Returns ------- MultiCompResult The results are stored as attributes, the main attributes are the following two. Other attributes are added for debugging purposes or as background information. - result_frame : pandas DataFrame with t_test results and multiple testing corrected p-values. - contrasts : matrix of constraints of the null hypothesis in the t_test. Notes ----- Status: experimental. Currently only checked for treatment coding with and without specified reference level. Currently there are no multiple testing corrected confidence intervals available. Examples -------- >>> res = ols("np.log(Days+1) ~ C(Weight) + C(Duration)", data).fit() >>> pw = res.t_test_pairwise("C(Weight)") >>> pw.result_frame coef std err t P>|t| Conf. Int. Low 2-1 0.632315 0.230003 2.749157 8.028083e-03 0.171563 3-1 1.302555 0.230003 5.663201 5.331513e-07 0.841803 3-2 0.670240 0.230003 2.914044 5.119126e-03 0.209488 Conf. Int. Upp. pvalue-hs reject-hs 2-1 1.093067 0.010212 True 3-1 1.763307 0.000002 True 3-2 1.130992 0.010212 True
t_test_pairwise
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def _get_wald_nonlinear(self, func, deriv=None): """Experimental method for nonlinear prediction and tests Parameters ---------- func : callable, f(params) nonlinear function of the estimation parameters. The return of the function can be vector valued, i.e. a 1-D array deriv : function or None first derivative or Jacobian of func. If deriv is None, then a numerical derivative will be used. If func returns a 1-D array, then the `deriv` should have rows corresponding to the elements of the return of func. Returns ------- nl : instance of `NonlinearDeltaCov` with attributes and methods to calculate the results for the prediction or tests """ from statsmodels.stats._delta_method import NonlinearDeltaCov func_args = None # TODO: not yet implemented, maybe skip - use partial nl = NonlinearDeltaCov(func, self.params, self.cov_params(), deriv=deriv, func_args=func_args) return nl
Experimental method for nonlinear prediction and tests Parameters ---------- func : callable, f(params) nonlinear function of the estimation parameters. The return of the function can be vector valued, i.e. a 1-D array deriv : function or None first derivative or Jacobian of func. If deriv is None, then a numerical derivative will be used. If func returns a 1-D array, then the `deriv` should have rows corresponding to the elements of the return of func. Returns ------- nl : instance of `NonlinearDeltaCov` with attributes and methods to calculate the results for the prediction or tests
_get_wald_nonlinear
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def conf_int(self, alpha=.05, cols=None): """ Construct confidence interval for the fitted parameters. Parameters ---------- alpha : float, optional The significance level for the confidence interval. The default `alpha` = .05 returns a 95% confidence interval. cols : array_like, optional Specifies which confidence intervals to return. .. deprecated: 0.13 cols is deprecated and will be removed after 0.14 is released. cols only works when inputs are NumPy arrays and will fail when using pandas Series or DataFrames as input. You can subset the confidence intervals using slices. Returns ------- array_like Each row contains [lower, upper] limits of the confidence interval for the corresponding parameter. The first column contains all lower, the second column contains all upper limits. Notes ----- The confidence interval is based on the standard normal distribution if self.use_t is False. If self.use_t is True, then uses a Student's t with self.df_resid_inference (or self.df_resid if df_resid_inference is not defined) degrees of freedom. Examples -------- >>> import statsmodels.api as sm >>> data = sm.datasets.longley.load() >>> data.exog = sm.add_constant(data.exog) >>> results = sm.OLS(data.endog, data.exog).fit() >>> results.conf_int() array([[-5496529.48322745, -1467987.78596704], [ -177.02903529, 207.15277984], [ -0.1115811 , 0.03994274], [ -3.12506664, -0.91539297], [ -1.5179487 , -0.54850503], [ -0.56251721, 0.460309 ], [ 798.7875153 , 2859.51541392]]) >>> results.conf_int(cols=(2,3)) array([[-0.1115811 , 0.03994274], [-3.12506664, -0.91539297]]) """ bse = self.bse if self.use_t: dist = stats.t df_resid = getattr(self, 'df_resid_inference', self.df_resid) q = dist.ppf(1 - alpha / 2, df_resid) else: dist = stats.norm q = dist.ppf(1 - alpha / 2) params = self.params lower = params - q * bse upper = params + q * bse if cols is not None: warnings.warn( "cols is deprecated and will be removed after 0.14 is " "released. cols only works when inputs are NumPy arrays and " "will fail when using pandas Series or DataFrames as input. " "Subsets of confidence intervals can be selected using slices " "of the full confidence interval array.", FutureWarning ) cols = np.asarray(cols) lower = lower[cols] upper = upper[cols] return np.asarray(lzip(lower, upper))
Construct confidence interval for the fitted parameters. Parameters ---------- alpha : float, optional The significance level for the confidence interval. The default `alpha` = .05 returns a 95% confidence interval. cols : array_like, optional Specifies which confidence intervals to return. .. deprecated: 0.13 cols is deprecated and will be removed after 0.14 is released. cols only works when inputs are NumPy arrays and will fail when using pandas Series or DataFrames as input. You can subset the confidence intervals using slices. Returns ------- array_like Each row contains [lower, upper] limits of the confidence interval for the corresponding parameter. The first column contains all lower, the second column contains all upper limits. Notes ----- The confidence interval is based on the standard normal distribution if self.use_t is False. If self.use_t is True, then uses a Student's t with self.df_resid_inference (or self.df_resid if df_resid_inference is not defined) degrees of freedom. Examples -------- >>> import statsmodels.api as sm >>> data = sm.datasets.longley.load() >>> data.exog = sm.add_constant(data.exog) >>> results = sm.OLS(data.endog, data.exog).fit() >>> results.conf_int() array([[-5496529.48322745, -1467987.78596704], [ -177.02903529, 207.15277984], [ -0.1115811 , 0.03994274], [ -3.12506664, -0.91539297], [ -1.5179487 , -0.54850503], [ -0.56251721, 0.460309 ], [ 798.7875153 , 2859.51541392]]) >>> results.conf_int(cols=(2,3)) array([[-0.1115811 , 0.03994274], [-3.12506664, -0.91539297]])
conf_int
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def save(self, fname, remove_data=False): """ Save a pickle of this instance. Parameters ---------- fname : {str, handle} A string filename or a file handle. remove_data : bool If False (default), then the instance is pickled without changes. If True, then all arrays with length nobs are set to None before pickling. See the remove_data method. In some cases not all arrays will be set to None. Notes ----- If remove_data is true and the model result does not implement a remove_data method then this will raise an exception. """ from statsmodels.iolib.smpickle import save_pickle if remove_data: self.remove_data() save_pickle(self, fname)
Save a pickle of this instance. Parameters ---------- fname : {str, handle} A string filename or a file handle. remove_data : bool If False (default), then the instance is pickled without changes. If True, then all arrays with length nobs are set to None before pickling. See the remove_data method. In some cases not all arrays will be set to None. Notes ----- If remove_data is true and the model result does not implement a remove_data method then this will raise an exception.
save
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def load(cls, fname): """ Load a pickled results instance .. warning:: Loading pickled models is not secure against erroneous or maliciously constructed data. Never unpickle data received from an untrusted or unauthenticated source. Parameters ---------- fname : {str, handle, pathlib.Path} A string filename or a file handle. Returns ------- Results The unpickled results instance. """ from statsmodels.iolib.smpickle import load_pickle return load_pickle(fname)
Load a pickled results instance .. warning:: Loading pickled models is not secure against erroneous or maliciously constructed data. Never unpickle data received from an untrusted or unauthenticated source. Parameters ---------- fname : {str, handle, pathlib.Path} A string filename or a file handle. Returns ------- Results The unpickled results instance.
load
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def remove_data(self): """ Remove data arrays, all nobs arrays from result and model. This reduces the size of the instance, so it can be pickled with less memory. Currently tested for use with predict from an unpickled results and model instance. .. warning:: Since data and some intermediate results have been removed calculating new statistics that require them will raise exceptions. The exception will occur the first time an attribute is accessed that has been set to None. Not fully tested for time series models, tsa, and might delete too much for prediction or not all that would be possible. The lists of arrays to delete are maintained as attributes of the result and model instance, except for cached values. These lists could be changed before calling remove_data. The attributes to remove are named in: model._data_attr : arrays attached to both the model instance and the results instance with the same attribute name. result._data_in_cache : arrays that may exist as values in result._cache result._data_attr_model : arrays attached to the model instance but not to the results instance """ cls = self.__class__ # Note: we cannot just use `getattr(cls, x)` or `getattr(self, x)` # because of redirection involved with property-like accessors cls_attrs = {} for name in dir(cls): try: attr = object.__getattribute__(cls, name) except AttributeError: pass else: cls_attrs[name] = attr data_attrs = [x for x in cls_attrs if isinstance(cls_attrs[x], cached_data)] for name in data_attrs: self._cache[name] = None def wipe(obj, att): # get to last element in attribute path p = att.split('.') att_ = p.pop(-1) try: obj_ = reduce(getattr, [obj] + p) if hasattr(obj_, att_): setattr(obj_, att_, None) except AttributeError: pass model_only = ['model.' + i for i in getattr(self, "_data_attr_model", [])] model_attr = ['model.' + i for i in self.model._data_attr] for att in self._data_attr + model_attr + model_only: if att in data_attrs: # these have been handled above, and trying to call wipe # would raise an Exception anyway, so skip these continue wipe(self, att) for key in self._data_in_cache: try: self._cache[key] = None except (AttributeError, KeyError): pass
Remove data arrays, all nobs arrays from result and model. This reduces the size of the instance, so it can be pickled with less memory. Currently tested for use with predict from an unpickled results and model instance. .. warning:: Since data and some intermediate results have been removed calculating new statistics that require them will raise exceptions. The exception will occur the first time an attribute is accessed that has been set to None. Not fully tested for time series models, tsa, and might delete too much for prediction or not all that would be possible. The lists of arrays to delete are maintained as attributes of the result and model instance, except for cached values. These lists could be changed before calling remove_data. The attributes to remove are named in: model._data_attr : arrays attached to both the model instance and the results instance with the same attribute name. result._data_in_cache : arrays that may exist as values in result._cache result._data_attr_model : arrays attached to the model instance but not to the results instance
remove_data
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def df_modelwc(self): """Model WC""" # collect different ways of defining the number of parameters, used for # aic, bic k_extra = getattr(self.model, "k_extra", 0) if hasattr(self, 'df_model'): if hasattr(self, 'k_constant'): hasconst = self.k_constant elif hasattr(self, 'hasconst'): hasconst = self.hasconst else: # default assumption hasconst = 1 return self.df_model + hasconst + k_extra else: return self.params.size
Model WC
df_modelwc
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def aic(self): """Akaike information criterion""" return -2 * self.llf + 2 * (self.df_modelwc)
Akaike information criterion
aic
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def bic(self): """Bayesian information criterion""" return -2 * self.llf + np.log(self.nobs) * (self.df_modelwc)
Bayesian information criterion
bic
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def score_obsv(self): """cached Jacobian of log-likelihood """ return self.model.score_obs(self.params)
cached Jacobian of log-likelihood
score_obsv
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def hessv(self): """cached Hessian of log-likelihood """ return self.model.hessian(self.params)
cached Hessian of log-likelihood
hessv
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def covjac(self): """ covariance of parameters based on outer product of jacobian of log-likelihood """ # if not hasattr(self, '_results'): # raise ValueError('need to call fit first') # #self.fit() # self.jacv = jacv = self.jac(self._results.params) jacv = self.score_obsv return np.linalg.inv(np.dot(jacv.T, jacv))
covariance of parameters based on outer product of jacobian of log-likelihood
covjac
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def covjhj(self): """covariance of parameters based on HJJH dot product of Hessian, Jacobian, Jacobian, Hessian of likelihood name should be covhjh """ jacv = self.score_obsv hessv = self.hessv hessinv = np.linalg.inv(hessv) # self.hessinv = hessin = self.cov_params() return np.dot(hessinv, np.dot(np.dot(jacv.T, jacv), hessinv))
covariance of parameters based on HJJH dot product of Hessian, Jacobian, Jacobian, Hessian of likelihood name should be covhjh
covjhj
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def bsejhj(self): """standard deviation of parameter estimates based on covHJH """ return np.sqrt(np.diag(self.covjhj))
standard deviation of parameter estimates based on covHJH
bsejhj
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def bsejac(self): """standard deviation of parameter estimates based on covjac """ return np.sqrt(np.diag(self.covjac))
standard deviation of parameter estimates based on covjac
bsejac
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def bootstrap(self, nrep=100, method='nm', disp=0, store=1): """simple bootstrap to get mean and variance of estimator see notes Parameters ---------- nrep : int number of bootstrap replications method : str optimization method to use disp : bool If true, then optimization prints results store : bool If true, then parameter estimates for all bootstrap iterations are attached in self.bootstrap_results Returns ------- mean : ndarray mean of parameter estimates over bootstrap replications std : ndarray standard deviation of parameter estimates over bootstrap replications Notes ----- This was mainly written to compare estimators of the standard errors of the parameter estimates. It uses independent random sampling from the original endog and exog, and therefore is only correct if observations are independently distributed. This will be moved to apply only to models with independently distributed observations. """ results = [] hascloneattr = True if hasattr(self.model, 'cloneattr') else False for i in range(nrep): rvsind = np.random.randint(self.nobs, size=self.nobs) # this needs to set startparam and get other defining attributes # need a clone method on model if self.exog is not None: exog_resamp = self.exog[rvsind, :] else: exog_resamp = None # build auxiliary model and fit init_kwds = self.model._get_init_kwds() fitmod = self.model.__class__(self.endog[rvsind], exog=exog_resamp, **init_kwds) if hascloneattr: for attr in self.model.cloneattr: setattr(fitmod, attr, getattr(self.model, attr)) fitres = fitmod.fit(method=method, disp=disp) results.append(fitres.params) results = np.array(results) if store: self.bootstrap_results = results return results.mean(0), results.std(0), results
simple bootstrap to get mean and variance of estimator see notes Parameters ---------- nrep : int number of bootstrap replications method : str optimization method to use disp : bool If true, then optimization prints results store : bool If true, then parameter estimates for all bootstrap iterations are attached in self.bootstrap_results Returns ------- mean : ndarray mean of parameter estimates over bootstrap replications std : ndarray standard deviation of parameter estimates over bootstrap replications Notes ----- This was mainly written to compare estimators of the standard errors of the parameter estimates. It uses independent random sampling from the original endog and exog, and therefore is only correct if observations are independently distributed. This will be moved to apply only to models with independently distributed observations.
bootstrap
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def get_nlfun(self, fun): """ get_nlfun This is not Implemented """ # I think this is supposed to get the delta method that is currently # in miscmodels count (as part of Poisson example) raise NotImplementedError
get_nlfun This is not Implemented
get_nlfun
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def pseudo_rsquared(self, kind="mcf"): """ McFadden's pseudo-R-squared. `1 - (llf / llnull)` """ kind = kind.lower() if kind.startswith("mcf"): prsq = 1 - self.llf / self.llnull elif kind.startswith("cox") or kind in ["cs", "lr"]: prsq = 1 - np.exp((self.llnull - self.llf) * (2 / self.nobs)) else: raise ValueError("only McFadden and Cox-Snell are available") return prsq
McFadden's pseudo-R-squared. `1 - (llf / llnull)`
pseudo_rsquared
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def llr(self): """ Likelihood ratio chi-squared statistic; `-2*(llnull - llf)` """ return -2*(self.llnull - self.llf)
Likelihood ratio chi-squared statistic; `-2*(llnull - llf)`
llr
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def llr_pvalue(self): """ The chi-squared probability of getting a log-likelihood ratio statistic greater than llr. llr has a chi-squared distribution with degrees of freedom `df_model`. """ # see also RegressionModel compare_lr_test llr = self.llr df_full = self.df_resid df_restr = self.df_resid_null lrdf = (df_restr - df_full) self.df_lr_null = lrdf return stats.distributions.chi2.sf(llr, lrdf)
The chi-squared probability of getting a log-likelihood ratio statistic greater than llr. llr has a chi-squared distribution with degrees of freedom `df_model`.
llr_pvalue
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def set_null_options(self, llnull=None, attach_results=True, **kwargs): """ Set the fit options for the Null (constant-only) model. This resets the cache for related attributes which is potentially fragile. This only sets the option, the null model is estimated when llnull is accessed, if llnull is not yet in cache. Parameters ---------- llnull : {None, float} If llnull is not None, then the value will be directly assigned to the cached attribute "llnull". attach_results : bool Sets an internal flag whether the results instance of the null model should be attached. By default without calling this method, thenull model results are not attached and only the loglikelihood value llnull is stored. **kwargs Additional keyword arguments used as fit keyword arguments for the null model. The override and model default values. Notes ----- Modifies attributes of this instance, and so has no return. """ # reset cache, note we need to add here anything that depends on # llnullor the null model. If something is missing, then the attribute # might be incorrect. self._cache.pop('llnull', None) self._cache.pop('llr', None) self._cache.pop('llr_pvalue', None) self._cache.pop('prsquared', None) if hasattr(self, 'res_null'): del self.res_null if llnull is not None: self._cache['llnull'] = llnull self._attach_nullmodel = attach_results self._optim_kwds_null = kwargs
Set the fit options for the Null (constant-only) model. This resets the cache for related attributes which is potentially fragile. This only sets the option, the null model is estimated when llnull is accessed, if llnull is not yet in cache. Parameters ---------- llnull : {None, float} If llnull is not None, then the value will be directly assigned to the cached attribute "llnull". attach_results : bool Sets an internal flag whether the results instance of the null model should be attached. By default without calling this method, thenull model results are not attached and only the loglikelihood value llnull is stored. **kwargs Additional keyword arguments used as fit keyword arguments for the null model. The override and model default values. Notes ----- Modifies attributes of this instance, and so has no return.
set_null_options
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def llnull(self): """ Value of the constant-only loglikelihood """ model = self.model kwds = model._get_init_kwds().copy() for key in getattr(model, '_null_drop_keys', []): del kwds[key] # TODO: what parameters to pass to fit? mod_null = model.__class__(model.endog, np.ones(self.nobs), **kwds) # TODO: consider catching and warning on convergence failure? # in the meantime, try hard to converge. see # TestPoissonConstrained1a.test_smoke optim_kwds = getattr(self, '_optim_kwds_null', {}).copy() if 'start_params' in optim_kwds: # user provided sp_null = optim_kwds.pop('start_params') elif hasattr(model, '_get_start_params_null'): # get moment estimates if available sp_null = model._get_start_params_null() else: sp_null = None opt_kwds = dict(method='bfgs', warn_convergence=False, maxiter=10000, disp=0) opt_kwds.update(optim_kwds) if optim_kwds: res_null = mod_null.fit(start_params=sp_null, **opt_kwds) else: # this should be a reasonably method case across versions res_null = mod_null.fit(start_params=sp_null, method='nm', warn_convergence=False, maxiter=10000, disp=0) res_null = mod_null.fit(start_params=res_null.params, method='bfgs', warn_convergence=False, maxiter=10000, disp=0) if getattr(self, '_attach_nullmodel', False) is not False: self.res_null = res_null self.k_null = len(res_null.params) self.df_resid_null = res_null.df_resid return res_null.llf
Value of the constant-only loglikelihood
llnull
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def get_prediction( self, exog=None, which="mean", transform=True, row_labels=None, average=False, agg_weights=None, **kwargs ): """ Compute prediction results when endpoint transformation is valid. Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. which : str Which statistic is to be predicted. Default is "mean". The available statistics and options depend on the model. see the model.predict docstring row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. average : bool If average is True, then the mean prediction is computed, that is, predictions are computed for individual exog and then the average over observation is used. If average is False, then the results are the predictions for all observations, i.e. same length as ``exog``. agg_weights : ndarray, optional Aggregation weights, only used if average is True. The weights are not normalized. **kwargs : Some models can take additional keyword arguments, such as offset, exposure or additional exog in multi-part models like zero inflated models. See the predict method of the model for the details. Returns ------- prediction_results : PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary dataframe for the prediction. Notes ----- Status: new in 0.14, experimental """ from statsmodels.base._prediction_inference import get_prediction pred_kwds = kwargs res = get_prediction( self, exog=exog, which=which, transform=transform, row_labels=row_labels, average=average, agg_weights=agg_weights, pred_kwds=pred_kwds ) return res
Compute prediction results when endpoint transformation is valid. Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. which : str Which statistic is to be predicted. Default is "mean". The available statistics and options depend on the model. see the model.predict docstring row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. average : bool If average is True, then the mean prediction is computed, that is, predictions are computed for individual exog and then the average over observation is used. If average is False, then the results are the predictions for all observations, i.e. same length as ``exog``. agg_weights : ndarray, optional Aggregation weights, only used if average is True. The weights are not normalized. **kwargs : Some models can take additional keyword arguments, such as offset, exposure or additional exog in multi-part models like zero inflated models. See the predict method of the model for the details. Returns ------- prediction_results : PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary dataframe for the prediction. Notes ----- Status: new in 0.14, experimental
get_prediction
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def summary(self, yname=None, xname=None, title=None, alpha=.05): """Summarize the Regression Results Parameters ---------- yname : str, optional Default is `y` xname : list[str], optional Names for the exogenous variables, default is "var_xx". Must match the number of parameters in the model title : str, optional Title for the top table. If not None, then this replaces the default title alpha : float significance level for the confidence intervals Returns ------- smry : Summary instance this holds the summary tables and text, which can be printed or converted to various output formats. See Also -------- statsmodels.iolib.summary.Summary : class to hold summary results """ top_left = [('Dep. Variable:', None), ('Model:', None), ('Method:', ['Maximum Likelihood']), ('Date:', None), ('Time:', None), ('No. Observations:', None), ('Df Residuals:', None), ('Df Model:', None), ] top_right = [('Log-Likelihood:', None), ('AIC:', ["%#8.4g" % self.aic]), ('BIC:', ["%#8.4g" % self.bic]) ] if title is None: title = self.model.__class__.__name__ + ' ' + "Results" # create summary table instance from statsmodels.iolib.summary import Summary smry = Summary() smry.add_table_2cols(self, gleft=top_left, gright=top_right, yname=yname, xname=xname, title=title) smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha, use_t=self.use_t) return smry
Summarize the Regression Results Parameters ---------- yname : str, optional Default is `y` xname : list[str], optional Names for the exogenous variables, default is "var_xx". Must match the number of parameters in the model title : str, optional Title for the top table. If not None, then this replaces the default title alpha : float significance level for the confidence intervals Returns ------- smry : Summary instance this holds the summary tables and text, which can be printed or converted to various output formats. See Also -------- statsmodels.iolib.summary.Summary : class to hold summary results
summary
python
statsmodels/statsmodels
statsmodels/base/model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py
BSD-3-Clause
def func(self, params): """ A penalty function on a vector of parameters. Parameters ---------- params : array_like A vector of parameters. Returns ------- A scalar penaty value; greater values imply greater penalization. """ raise NotImplementedError
A penalty function on a vector of parameters. Parameters ---------- params : array_like A vector of parameters. Returns ------- A scalar penaty value; greater values imply greater penalization.
func
python
statsmodels/statsmodels
statsmodels/base/_penalties.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py
BSD-3-Clause
def deriv(self, params): """ The gradient of a penalty function. Parameters ---------- params : array_like A vector of parameters Returns ------- The gradient of the penalty with respect to each element in `params`. """ raise NotImplementedError
The gradient of a penalty function. Parameters ---------- params : array_like A vector of parameters Returns ------- The gradient of the penalty with respect to each element in `params`.
deriv
python
statsmodels/statsmodels
statsmodels/base/_penalties.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py
BSD-3-Clause
def _null_weights(self, params): """work around for Null model This will not be needed anymore when we can use `self._null_drop_keys` as in DiscreteModels. TODO: check other models """ if np.size(self.weights) > 1: if len(params) == 1: raise # raise to identify models where this would be needed return 0. return self.weights
work around for Null model This will not be needed anymore when we can use `self._null_drop_keys` as in DiscreteModels. TODO: check other models
_null_weights
python
statsmodels/statsmodels
statsmodels/base/_penalties.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py
BSD-3-Clause
def deriv2(self, params): """Second derivative of function This returns scalar or vector in same shape as params, not a square Hessian. If the return is 1 dimensional, then it is the diagonal of the Hessian. """ # 3 segments in absolute value tau = self.tau p = np.atleast_1d(params) p_abs = np.abs(p) res = np.zeros(p_abs.shape) mask1 = p_abs < tau mask3 = p_abs >= self.c * tau mask2 = ~mask1 & ~mask3 res[mask2] = -1 / (self.c - 1) return self.weights * res
Second derivative of function This returns scalar or vector in same shape as params, not a square Hessian. If the return is 1 dimensional, then it is the diagonal of the Hessian.
deriv2
python
statsmodels/statsmodels
statsmodels/base/_penalties.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py
BSD-3-Clause
def func(self, params): """evaluate penalty function at params Parameter --------- params : ndarray array of parameters at which derivative is evaluated Returns ------- deriv2 : ndarray value(s) of penalty function """ # TODO: `and np.size(params) > 1` is hack for llnull, need better solution # Is this still needed? it seems to work without if self.restriction is not None: params = self.restriction.dot(params) value = self.penalty.func(params) return (self.weights * value.T).T.sum(0)
evaluate penalty function at params Parameter --------- params : ndarray array of parameters at which derivative is evaluated Returns ------- deriv2 : ndarray value(s) of penalty function
func
python
statsmodels/statsmodels
statsmodels/base/_penalties.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py
BSD-3-Clause
def deriv(self, params): """first derivative of penalty function w.r.t. params Parameter --------- params : ndarray array of parameters at which derivative is evaluated Returns ------- deriv2 : ndarray array of first partial derivatives """ if self.restriction is not None: params = self.restriction.dot(params) value = self.penalty.deriv(params) if self.restriction is not None: return self.weights * value.T.dot(self.restriction) else: return (self.weights * value.T)
first derivative of penalty function w.r.t. params Parameter --------- params : ndarray array of parameters at which derivative is evaluated Returns ------- deriv2 : ndarray array of first partial derivatives
deriv
python
statsmodels/statsmodels
statsmodels/base/_penalties.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py
BSD-3-Clause
def deriv2(self, params): """second derivative of penalty function w.r.t. params Parameter --------- params : ndarray array of parameters at which derivative is evaluated Returns ------- deriv2 : ndarray, 2-D second derivative matrix """ if self.restriction is not None: params = self.restriction.dot(params) value = self.penalty.deriv2(params) if self.restriction is not None: # note: univariate penalty returns 1d array for diag, # i.e. hessian_diag v = (self.restriction.T * value * self.weights) value = v.dot(self.restriction) else: value = np.diag(self.weights * value) return value
second derivative of penalty function w.r.t. params Parameter --------- params : ndarray array of parameters at which derivative is evaluated Returns ------- deriv2 : ndarray, 2-D second derivative matrix
deriv2
python
statsmodels/statsmodels
statsmodels/base/_penalties.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py
BSD-3-Clause
def func(self, mat, mat_inv): """ Parameters ---------- mat : square matrix The matrix to be penalized. mat_inv : square matrix The inverse of `mat`. Returns ------- A scalar penalty value """ raise NotImplementedError
Parameters ---------- mat : square matrix The matrix to be penalized. mat_inv : square matrix The inverse of `mat`. Returns ------- A scalar penalty value
func
python
statsmodels/statsmodels
statsmodels/base/_penalties.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py
BSD-3-Clause
def deriv(self, mat, mat_inv): """ Parameters ---------- mat : square matrix The matrix to be penalized. mat_inv : square matrix The inverse of `mat`. Returns ------- A vector containing the gradient of the penalty with respect to each element in the lower triangle of `mat`. """ raise NotImplementedError
Parameters ---------- mat : square matrix The matrix to be penalized. mat_inv : square matrix The inverse of `mat`. Returns ------- A vector containing the gradient of the penalty with respect to each element in the lower triangle of `mat`.
deriv
python
statsmodels/statsmodels
statsmodels/base/_penalties.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalties.py
BSD-3-Clause
def qc_results(params, alpha, score, qc_tol, qc_verbose=False): """ Theory dictates that one of two conditions holds: i) abs(score[i]) == alpha[i] and params[i] != 0 ii) abs(score[i]) <= alpha[i] and params[i] == 0 qc_results checks to see that (ii) holds, within qc_tol qc_results also checks for nan or results of the wrong shape. Parameters ---------- params : ndarray model parameters. Not including the added variables x_added. alpha : ndarray regularization coefficients score : function Gradient of unregularized objective function qc_tol : float Tolerance to hold conditions (i) and (ii) to for QC check. qc_verbose : bool If true, print out a full QC report upon failure Returns ------- passed : bool True if QC check passed qc_dict : Dictionary Keys are fprime, alpha, params, passed_array Prints ------ Warning message if QC check fails. """ ## Check for fatal errors assert not np.isnan(params).max() assert (params == params.ravel('F')).min(), \ "params should have already been 1-d" ## Start the theory compliance check fprime = score(params) k_params = len(params) passed_array = np.array([True] * k_params) for i in range(k_params): if alpha[i] > 0: # If |fprime| is too big, then something went wrong if (abs(fprime[i]) - alpha[i]) / alpha[i] > qc_tol: passed_array[i] = False qc_dict = dict( fprime=fprime, alpha=alpha, params=params, passed_array=passed_array) passed = passed_array.min() if not passed: num_failed = (~passed_array).sum() message = 'QC check did not pass for %d out of %d parameters' % ( num_failed, k_params) message += '\nTry increasing solver accuracy or number of iterations'\ ', decreasing alpha, or switch solvers' if qc_verbose: message += _get_verbose_addon(qc_dict) import warnings warnings.warn(message, ConvergenceWarning) return passed
Theory dictates that one of two conditions holds: i) abs(score[i]) == alpha[i] and params[i] != 0 ii) abs(score[i]) <= alpha[i] and params[i] == 0 qc_results checks to see that (ii) holds, within qc_tol qc_results also checks for nan or results of the wrong shape. Parameters ---------- params : ndarray model parameters. Not including the added variables x_added. alpha : ndarray regularization coefficients score : function Gradient of unregularized objective function qc_tol : float Tolerance to hold conditions (i) and (ii) to for QC check. qc_verbose : bool If true, print out a full QC report upon failure Returns ------- passed : bool True if QC check passed qc_dict : Dictionary Keys are fprime, alpha, params, passed_array Prints ------ Warning message if QC check fails.
qc_results
python
statsmodels/statsmodels
statsmodels/base/l1_solvers_common.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_solvers_common.py
BSD-3-Clause
def do_trim_params(params, k_params, alpha, score, passed, trim_mode, size_trim_tol, auto_trim_tol): """ Trims (set to zero) params that are zero at the theoretical minimum. Uses heuristics to account for the solver not actually finding the minimum. In all cases, if alpha[i] == 0, then do not trim the ith param. In all cases, do nothing with the added variables. Parameters ---------- params : ndarray model parameters. Not including added variables. k_params : Int Number of parameters alpha : ndarray regularization coefficients score : Function. score(params) should return a 1-d vector of derivatives of the unpenalized objective function. passed : bool True if the QC check passed trim_mode : 'auto, 'size', or 'off' If not 'off', trim (set to zero) parameters that would have been zero if the solver reached the theoretical minimum. If 'auto', trim params using the Theory above. If 'size', trim params if they have very small absolute value size_trim_tol : float or 'auto' (default = 'auto') For use when trim_mode === 'size' auto_trim_tol : float For sue when trim_mode == 'auto'. Use qc_tol : float Print warning and do not allow auto trim when (ii) in "Theory" (above) is violated by this much. Returns ------- params : ndarray Trimmed model parameters trimmed : ndarray of booleans trimmed[i] == True if the ith parameter was trimmed. """ ## Trim the small params trimmed = [False] * k_params if trim_mode == 'off': trimmed = np.array([False] * k_params) elif trim_mode == 'auto' and not passed: import warnings msg = "Could not trim params automatically due to failed QC check. " \ "Trimming using trim_mode == 'size' will still work." warnings.warn(msg, ConvergenceWarning) trimmed = np.array([False] * k_params) elif trim_mode == 'auto' and passed: fprime = score(params) for i in range(k_params): if alpha[i] != 0: if (alpha[i] - abs(fprime[i])) / alpha[i] > auto_trim_tol: params[i] = 0.0 trimmed[i] = True elif trim_mode == 'size': for i in range(k_params): if alpha[i] != 0: if abs(params[i]) < size_trim_tol: params[i] = 0.0 trimmed[i] = True else: raise ValueError( "trim_mode == %s, which is not recognized" % (trim_mode)) return params, np.asarray(trimmed)
Trims (set to zero) params that are zero at the theoretical minimum. Uses heuristics to account for the solver not actually finding the minimum. In all cases, if alpha[i] == 0, then do not trim the ith param. In all cases, do nothing with the added variables. Parameters ---------- params : ndarray model parameters. Not including added variables. k_params : Int Number of parameters alpha : ndarray regularization coefficients score : Function. score(params) should return a 1-d vector of derivatives of the unpenalized objective function. passed : bool True if the QC check passed trim_mode : 'auto, 'size', or 'off' If not 'off', trim (set to zero) parameters that would have been zero if the solver reached the theoretical minimum. If 'auto', trim params using the Theory above. If 'size', trim params if they have very small absolute value size_trim_tol : float or 'auto' (default = 'auto') For use when trim_mode === 'size' auto_trim_tol : float For sue when trim_mode == 'auto'. Use qc_tol : float Print warning and do not allow auto trim when (ii) in "Theory" (above) is violated by this much. Returns ------- params : ndarray Trimmed model parameters trimmed : ndarray of booleans trimmed[i] == True if the ith parameter was trimmed.
do_trim_params
python
statsmodels/statsmodels
statsmodels/base/l1_solvers_common.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/l1_solvers_common.py
BSD-3-Clause
def t_test(self, value=0, alternative='two-sided'): '''z- or t-test for hypothesis that mean is equal to value Parameters ---------- value : array_like value under the null hypothesis alternative : str 'two-sided', 'larger', 'smaller' Returns ------- stat : ndarray test statistic pvalue : ndarray p-value of the hypothesis test, the distribution is given by the attribute of the instance, specified in `__init__`. Default if not specified is the normal distribution. ''' # assumes symmetric distribution stat = (self.predicted - value) / self.se if alternative in ['two-sided', '2-sided', '2s']: pvalue = self.dist.sf(np.abs(stat), *self.dist_args)*2 elif alternative in ['larger', 'l']: pvalue = self.dist.sf(stat, *self.dist_args) elif alternative in ['smaller', 's']: pvalue = self.dist.cdf(stat, *self.dist_args) else: raise ValueError('invalid alternative') return stat, pvalue
z- or t-test for hypothesis that mean is equal to value Parameters ---------- value : array_like value under the null hypothesis alternative : str 'two-sided', 'larger', 'smaller' Returns ------- stat : ndarray test statistic pvalue : ndarray p-value of the hypothesis test, the distribution is given by the attribute of the instance, specified in `__init__`. Default if not specified is the normal distribution.
t_test
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def _conf_int_generic(self, center, se, alpha, dist_args=None): """internal function to avoid code duplication """ if dist_args is None: dist_args = () q = self.dist.ppf(1 - alpha / 2., *dist_args) lower = center - q * se upper = center + q * se ci = np.column_stack((lower, upper)) # if we want to stack at a new last axis, for lower.ndim > 1 # np.concatenate((lower[..., None], upper[..., None]), axis=-1) return ci
internal function to avoid code duplication
_conf_int_generic
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def conf_int(self, *, alpha=0.05, **kwds): """Confidence interval for the predicted value. Parameters ---------- alpha : float, optional The significance level for the confidence interval. ie., The default `alpha` = .05 returns a 95% confidence interval. kwds : extra keyword arguments Ignored in base class, only for compatibility, consistent signature with subclasses Returns ------- ci : ndarray, (k_constraints, 2) The array has the lower and the upper limit of the confidence interval in the columns. """ ci = self._conf_int_generic(self.predicted, self.se, alpha, dist_args=self.dist_args) return ci
Confidence interval for the predicted value. Parameters ---------- alpha : float, optional The significance level for the confidence interval. ie., The default `alpha` = .05 returns a 95% confidence interval. kwds : extra keyword arguments Ignored in base class, only for compatibility, consistent signature with subclasses Returns ------- ci : ndarray, (k_constraints, 2) The array has the lower and the upper limit of the confidence interval in the columns.
conf_int
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def summary_frame(self, alpha=0.05): """Summary frame Parameters ---------- alpha : float, optional The significance level for the confidence interval. ie., The default `alpha` = .05 returns a 95% confidence interval. Returns ------- pandas DataFrame with columns 'predicted', 'se', 'ci_lower', 'ci_upper' """ ci = self.conf_int(alpha=alpha) to_include = {} to_include['predicted'] = self.predicted to_include['se'] = self.se to_include['ci_lower'] = ci[:, 0] to_include['ci_upper'] = ci[:, 1] self.table = to_include # pandas dict does not handle 2d_array # data = np.column_stack(list(to_include.values())) # names = .... res = pd.DataFrame(to_include, index=self.row_labels, columns=to_include.keys()) return res
Summary frame Parameters ---------- alpha : float, optional The significance level for the confidence interval. ie., The default `alpha` = .05 returns a 95% confidence interval. Returns ------- pandas DataFrame with columns 'predicted', 'se', 'ci_lower', 'ci_upper'
summary_frame
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def _conf_int_generic(self, center, se, alpha, dist_args=None): """internal function to avoid code duplication """ if dist_args is None: dist_args = () q = self.dist.ppf(1 - alpha / 2., *dist_args) lower = center - q * se upper = center + q * se ci = np.column_stack((lower, upper)) # if we want to stack at a new last axis, for lower.ndim > 1 # np.concatenate((lower[..., None], upper[..., None]), axis=-1) return ci
internal function to avoid code duplication
_conf_int_generic
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def conf_int(self, method='endpoint', alpha=0.05, **kwds): """Confidence interval for the predicted value. This is currently only available for t and z tests. Parameters ---------- method : {"endpoint", "delta"} Method for confidence interval, "m If method is "endpoint", then the confidence interval of the linear predictor is transformed by the prediction function. If method is "delta", then the delta-method is used. The confidence interval in this case might reach outside the range of the prediction, for example probabilities larger than one or smaller than zero. alpha : float, optional The significance level for the confidence interval. ie., The default `alpha` = .05 returns a 95% confidence interval. kwds : extra keyword arguments currently ignored, only for compatibility, consistent signature Returns ------- ci : ndarray, (k_constraints, 2) The array has the lower and the upper limit of the confidence interval in the columns. """ tmp = np.linspace(0, 1, 6) # TODO: drop check? is_linear = (self.func(tmp) == tmp).all() if method == 'endpoint' and not is_linear: ci_linear = self._conf_int_generic(self.linpred, self.linpred_se, alpha, dist_args=self.dist_args) ci = self.func(ci_linear) elif method == 'delta' or is_linear: ci = self._conf_int_generic(self.predicted, self.se, alpha, dist_args=self.dist_args) return ci
Confidence interval for the predicted value. This is currently only available for t and z tests. Parameters ---------- method : {"endpoint", "delta"} Method for confidence interval, "m If method is "endpoint", then the confidence interval of the linear predictor is transformed by the prediction function. If method is "delta", then the delta-method is used. The confidence interval in this case might reach outside the range of the prediction, for example probabilities larger than one or smaller than zero. alpha : float, optional The significance level for the confidence interval. ie., The default `alpha` = .05 returns a 95% confidence interval. kwds : extra keyword arguments currently ignored, only for compatibility, consistent signature Returns ------- ci : ndarray, (k_constraints, 2) The array has the lower and the upper limit of the confidence interval in the columns.
conf_int
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def conf_int(self, method='endpoint', alpha=0.05, **kwds): """Confidence interval for the predicted value. This is currently only available for t and z tests. Parameters ---------- method : {"endpoint", "delta"} Method for confidence interval, "m If method is "endpoint", then the confidence interval of the linear predictor is transformed by the prediction function. If method is "delta", then the delta-method is used. The confidence interval in this case might reach outside the range of the prediction, for example probabilities larger than one or smaller than zero. alpha : float, optional The significance level for the confidence interval. ie., The default `alpha` = .05 returns a 95% confidence interval. kwds : extra keyword arguments currently ignored, only for compatibility, consistent signature Returns ------- ci : ndarray, (k_constraints, 2) The array has the lower and the upper limit of the confidence interval in the columns. """ tmp = np.linspace(0, 1, 6) is_linear = (self.link.inverse(tmp) == tmp).all() if method == 'endpoint' and not is_linear: ci_linear = self.linpred.conf_int(alpha=alpha, obs=False) ci = self.link.inverse(ci_linear) elif method == 'delta' or is_linear: se = self.se_mean q = self.dist.ppf(1 - alpha / 2., *self.dist_args) lower = self.predicted_mean - q * se upper = self.predicted_mean + q * se ci = np.column_stack((lower, upper)) # if we want to stack at a new last axis, for lower.ndim > 1 # np.concatenate((lower[..., None], upper[..., None]), axis=-1) return ci
Confidence interval for the predicted value. This is currently only available for t and z tests. Parameters ---------- method : {"endpoint", "delta"} Method for confidence interval, "m If method is "endpoint", then the confidence interval of the linear predictor is transformed by the prediction function. If method is "delta", then the delta-method is used. The confidence interval in this case might reach outside the range of the prediction, for example probabilities larger than one or smaller than zero. alpha : float, optional The significance level for the confidence interval. ie., The default `alpha` = .05 returns a 95% confidence interval. kwds : extra keyword arguments currently ignored, only for compatibility, consistent signature Returns ------- ci : ndarray, (k_constraints, 2) The array has the lower and the upper limit of the confidence interval in the columns.
conf_int
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def summary_frame(self, alpha=0.05): """Summary frame Parameters ---------- alpha : float, optional The significance level for the confidence interval. ie., The default `alpha` = .05 returns a 95% confidence interval. Returns ------- pandas DataFrame with columns 'mean', 'mean_se', 'mean_ci_lower', 'mean_ci_upper'. """ # TODO: finish and cleanup ci_mean = self.conf_int(alpha=alpha) to_include = {} to_include['mean'] = self.predicted_mean to_include['mean_se'] = self.se_mean to_include['mean_ci_lower'] = ci_mean[:, 0] to_include['mean_ci_upper'] = ci_mean[:, 1] self.table = to_include # pandas dict does not handle 2d_array # data = np.column_stack(list(to_include.values())) # names = .... res = pd.DataFrame(to_include, index=self.row_labels, columns=to_include.keys()) return res
Summary frame Parameters ---------- alpha : float, optional The significance level for the confidence interval. ie., The default `alpha` = .05 returns a 95% confidence interval. Returns ------- pandas DataFrame with columns 'mean', 'mean_se', 'mean_ci_lower', 'mean_ci_upper'.
summary_frame
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def _get_exog_predict(self, exog=None, transform=True, row_labels=None): """Prepare or transform exog for prediction Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. Returns ------- exog : ndarray Prediction exog row_labels : list of str Labels or pandas index for rows of prediction """ # prepare exog and row_labels, based on base Results.predict if transform and hasattr(self.model, 'formula') and exog is not None: from statsmodels.formula._manager import FormulaManager mgr = FormulaManager() if isinstance(exog, pd.Series): exog = pd.DataFrame(exog) exog = mgr.get_matrices(self.model.data.model_spec, exog) if exog is not None: if row_labels is None: row_labels = getattr(exog, 'index', None) if callable(row_labels): row_labels = None exog = np.asarray(exog) if exog.ndim == 1 and (self.model.exog.ndim == 1 or self.model.exog.shape[1] == 1): exog = exog[:, None] exog = np.atleast_2d(exog) # needed in count model shape[1] else: exog = self.model.exog if row_labels is None: row_labels = getattr(self.model.data, 'row_labels', None) return exog, row_labels
Prepare or transform exog for prediction Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. Returns ------- exog : ndarray Prediction exog row_labels : list of str Labels or pandas index for rows of prediction
_get_exog_predict
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def get_prediction_glm(self, exog=None, transform=True, row_labels=None, linpred=None, link=None, pred_kwds=None): """ Compute prediction results for GLM compatible models. Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. linpred : linear prediction instance Instance of linear prediction results used for confidence intervals based on endpoint transformation. link : instance of link function If no link function is provided, then the `model.family.link` is used. pred_kwds : dict Some models can take additional keyword arguments, such as offset or additional exog in multi-part models. See the predict method of the model for the details. Returns ------- prediction_results : generalized_linear_model.PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary tables for the prediction of the mean and of new observations. """ # prepare exog and row_labels, based on base Results.predict exog, row_labels = _get_exog_predict( self, exog=exog, transform=transform, row_labels=row_labels, ) if pred_kwds is None: pred_kwds = {} predicted_mean = self.model.predict(self.params, exog, **pred_kwds) covb = self.cov_params() link_deriv = self.model.family.link.inverse_deriv(linpred.predicted_mean) var_pred_mean = link_deriv**2 * (exog * np.dot(covb, exog.T).T).sum(1) var_resid = self.scale # self.mse_resid / weights # TODO: check that we have correct scale, Refactor scale #??? # special case for now: if self.cov_type == 'fixed scale': var_resid = self.cov_kwds['scale'] dist = ['norm', 't'][self.use_t] return PredictionResultsMean( predicted_mean, var_pred_mean, var_resid, df=self.df_resid, dist=dist, row_labels=row_labels, linpred=linpred, link=link)
Compute prediction results for GLM compatible models. Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. linpred : linear prediction instance Instance of linear prediction results used for confidence intervals based on endpoint transformation. link : instance of link function If no link function is provided, then the `model.family.link` is used. pred_kwds : dict Some models can take additional keyword arguments, such as offset or additional exog in multi-part models. See the predict method of the model for the details. Returns ------- prediction_results : generalized_linear_model.PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary tables for the prediction of the mean and of new observations.
get_prediction_glm
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def get_prediction_linear(self, exog=None, transform=True, row_labels=None, pred_kwds=None, index=None): """ Compute prediction results for linear prediction. Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. pred_kwargs : Some models can take additional keyword arguments, such as offset or additional exog in multi-part models. See the predict method of the model for the details. index : slice or array-index Is used to select rows and columns of cov_params, if the prediction function only depends on a subset of parameters. Returns ------- prediction_results : PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary tables for the prediction. """ # prepare exog and row_labels, based on base Results.predict exog, row_labels = _get_exog_predict( self, exog=exog, transform=transform, row_labels=row_labels, ) if pred_kwds is None: pred_kwds = {} k1 = exog.shape[1] if len(self.params > k1): # TODO: we allow endpoint transformation only for the first link index = np.arange(k1) else: index = None # get linear prediction and standard errors covb = self.cov_params(column=index) var_pred = (exog * np.dot(covb, exog.T).T).sum(1) pred_kwds_linear = pred_kwds.copy() pred_kwds_linear["which"] = "linear" predicted = self.model.predict(self.params, exog, **pred_kwds_linear) dist = ['norm', 't'][self.use_t] res = PredictionResultsBase(predicted, var_pred, df=self.df_resid, dist=dist, row_labels=row_labels ) return res
Compute prediction results for linear prediction. Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. pred_kwargs : Some models can take additional keyword arguments, such as offset or additional exog in multi-part models. See the predict method of the model for the details. index : slice or array-index Is used to select rows and columns of cov_params, if the prediction function only depends on a subset of parameters. Returns ------- prediction_results : PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary tables for the prediction.
get_prediction_linear
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def get_prediction_monotonic(self, exog=None, transform=True, row_labels=None, link=None, pred_kwds=None, index=None): """ Compute prediction results when endpoint transformation is valid. Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. link : instance of link function If no link function is provided, then the ``mmodel.family.link` is used. pred_kwargs : Some models can take additional keyword arguments, such as offset or additional exog in multi-part models. See the predict method of the model for the details. index : slice or array-index Is used to select rows and columns of cov_params, if the prediction function only depends on a subset of parameters. Returns ------- prediction_results : PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary tables for the prediction. """ # prepare exog and row_labels, based on base Results.predict exog, row_labels = _get_exog_predict( self, exog=exog, transform=transform, row_labels=row_labels, ) if pred_kwds is None: pred_kwds = {} if link is None: link = self.model.family.link func_deriv = link.inverse_deriv # get linear prediction and standard errors covb = self.cov_params(column=index) linpred_var = (exog * np.dot(covb, exog.T).T).sum(1) pred_kwds_linear = pred_kwds.copy() pred_kwds_linear["which"] = "linear" linpred = self.model.predict(self.params, exog, **pred_kwds_linear) predicted = self.model.predict(self.params, exog, **pred_kwds) link_deriv = func_deriv(linpred) var_pred = link_deriv**2 * linpred_var dist = ['norm', 't'][self.use_t] res = PredictionResultsMonotonic(predicted, var_pred, df=self.df_resid, dist=dist, row_labels=row_labels, linpred=linpred, linpred_se=np.sqrt(linpred_var), func=link.inverse, deriv=func_deriv) return res
Compute prediction results when endpoint transformation is valid. Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. link : instance of link function If no link function is provided, then the ``mmodel.family.link` is used. pred_kwargs : Some models can take additional keyword arguments, such as offset or additional exog in multi-part models. See the predict method of the model for the details. index : slice or array-index Is used to select rows and columns of cov_params, if the prediction function only depends on a subset of parameters. Returns ------- prediction_results : PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary tables for the prediction.
get_prediction_monotonic
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def f_pred(p): """Prediction function as function of params """ pred = self.model.predict(p, exog, which=which, **pred_kwds) if average: # using `.T` which should work if aggweights is 1-dim pred = (pred.T * agg_weights.T).mean(-1).T return pred
Prediction function as function of params
get_prediction_delta.f_pred
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def get_prediction_delta( self, exog=None, which="mean", average=False, agg_weights=None, transform=True, row_labels=None, pred_kwds=None ): """ compute prediction results Parameters ---------- exog : array_like, optional The values for which you want to predict. which : str The statistic that is prediction. Which statistics are available depends on the model.predict method. average : bool If average is True, then the mean prediction is computed, that is, predictions are computed for individual exog and then them mean over observation is used. If average is False, then the results are the predictions for all observations, i.e. same length as ``exog``. agg_weights : ndarray, optional Aggregation weights, only used if average is True. The weights are not normalized. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. pred_kwargs : Some models can take additional keyword arguments, such as offset or additional exog in multi-part models. See the predict method of the model for the details. Returns ------- prediction_results : generalized_linear_model.PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary tables for the prediction of the mean and of new observations. """ # prepare exog and row_labels, based on base Results.predict exog, row_labels = _get_exog_predict( self, exog=exog, transform=transform, row_labels=row_labels, ) if agg_weights is None: agg_weights = np.array(1.) def f_pred(p): """Prediction function as function of params """ pred = self.model.predict(p, exog, which=which, **pred_kwds) if average: # using `.T` which should work if aggweights is 1-dim pred = (pred.T * agg_weights.T).mean(-1).T return pred nlpm = self._get_wald_nonlinear(f_pred) # TODO: currently returns NonlinearDeltaCov res = PredictionResultsDelta(nlpm) return res
compute prediction results Parameters ---------- exog : array_like, optional The values for which you want to predict. which : str The statistic that is prediction. Which statistics are available depends on the model.predict method. average : bool If average is True, then the mean prediction is computed, that is, predictions are computed for individual exog and then them mean over observation is used. If average is False, then the results are the predictions for all observations, i.e. same length as ``exog``. agg_weights : ndarray, optional Aggregation weights, only used if average is True. The weights are not normalized. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. pred_kwargs : Some models can take additional keyword arguments, such as offset or additional exog in multi-part models. See the predict method of the model for the details. Returns ------- prediction_results : generalized_linear_model.PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary tables for the prediction of the mean and of new observations.
get_prediction_delta
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def get_prediction(self, exog=None, transform=True, which="mean", row_labels=None, average=False, agg_weights=None, pred_kwds=None): """ Compute prediction results when endpoint transformation is valid. Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. which : str Which statistic is to be predicted. Default is "mean". The available statistics and options depend on the model. see the model.predict docstring linear : bool Linear has been replaced by the `which` keyword and will be deprecated. If linear is True, then `which` is ignored and the linear prediction is returned. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. average : bool If average is True, then the mean prediction is computed, that is, predictions are computed for individual exog and then the average over observation is used. If average is False, then the results are the predictions for all observations, i.e. same length as ``exog``. agg_weights : ndarray, optional Aggregation weights, only used if average is True. The weights are not normalized. **kwargs : Some models can take additional keyword arguments, such as offset, exposure or additional exog in multi-part models like zero inflated models. See the predict method of the model for the details. Returns ------- prediction_results : PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary dataframe for the prediction. Notes ----- Status: new in 0.14, experimental """ use_endpoint = getattr(self.model, "_use_endpoint", True) if which == "linear": res = get_prediction_linear( self, exog=exog, transform=transform, row_labels=row_labels, pred_kwds=pred_kwds, ) elif (which == "mean") and (use_endpoint is True) and (average is False): # endpoint transformation k1 = self.model.exog.shape[1] if len(self.params > k1): # TODO: we allow endpoint transformation only for the first link index = np.arange(k1) else: index = None pred_kwds["which"] = which # TODO: add link or ilink to all link based models (except zi link = getattr(self.model, "link", None) if link is None: # GLM if hasattr(self.model, "family"): link = getattr(self.model.family, "link", None) if link is None: # defaulting to log link for count models import warnings warnings.warn("using default log-link in get_prediction") from statsmodels.genmod.families import links link = links.Log() res = get_prediction_monotonic( self, exog=exog, transform=transform, row_labels=row_labels, link=link, pred_kwds=pred_kwds, index=index, ) else: # which is not mean or linear, or we need averaging res = get_prediction_delta( self, exog=exog, which=which, average=average, agg_weights=agg_weights, pred_kwds=pred_kwds, ) return res
Compute prediction results when endpoint transformation is valid. Parameters ---------- exog : array_like, optional The values for which you want to predict. transform : bool, optional If the model was fit via a formula, do you want to pass exog through the formula. Default is True. E.g., if you fit a model y ~ log(x1) + log(x2), and transform is True, then you can pass a data structure that contains x1 and x2 in their original form. Otherwise, you'd need to log the data first. which : str Which statistic is to be predicted. Default is "mean". The available statistics and options depend on the model. see the model.predict docstring linear : bool Linear has been replaced by the `which` keyword and will be deprecated. If linear is True, then `which` is ignored and the linear prediction is returned. row_labels : list of str or None If row_lables are provided, then they will replace the generated labels. average : bool If average is True, then the mean prediction is computed, that is, predictions are computed for individual exog and then the average over observation is used. If average is False, then the results are the predictions for all observations, i.e. same length as ``exog``. agg_weights : ndarray, optional Aggregation weights, only used if average is True. The weights are not normalized. **kwargs : Some models can take additional keyword arguments, such as offset, exposure or additional exog in multi-part models like zero inflated models. See the predict method of the model for the details. Returns ------- prediction_results : PredictionResults The prediction results instance contains prediction and prediction variance and can on demand calculate confidence intervals and summary dataframe for the prediction. Notes ----- Status: new in 0.14, experimental
get_prediction
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def params_transform_univariate(params, cov_params, link=None, transform=None, row_labels=None): """ results for univariate, nonlinear, monotonicaly transformed parameters This provides transformed values, standard errors and confidence interval for transformations of parameters, for example in calculating rates with `exp(params)` in the case of Poisson or other models with exponential mean function. """ from statsmodels.genmod.families import links if link is None and transform is None: link = links.Log() if row_labels is None and hasattr(params, 'index'): row_labels = params.index params = np.asarray(params) predicted_mean = link.inverse(params) link_deriv = link.inverse_deriv(params) var_pred_mean = link_deriv**2 * np.diag(cov_params) # TODO: do we want covariance also, or just var/se dist = stats.norm # TODO: need ci for linear prediction, method of `lin_pred linpred = PredictionResultsMean( params, np.diag(cov_params), dist=dist, row_labels=row_labels, link=links.Identity()) res = PredictionResultsMean( predicted_mean, var_pred_mean, dist=dist, row_labels=row_labels, linpred=linpred, link=link) return res
results for univariate, nonlinear, monotonicaly transformed parameters This provides transformed values, standard errors and confidence interval for transformations of parameters, for example in calculating rates with `exp(params)` in the case of Poisson or other models with exponential mean function.
params_transform_univariate
python
statsmodels/statsmodels
statsmodels/base/_prediction_inference.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_prediction_inference.py
BSD-3-Clause
def loglike(self, params, pen_weight=None, **kwds): """ Log-likelihood of model at params """ if pen_weight is None: pen_weight = self.pen_weight llf = super().loglike(params, **kwds) if pen_weight != 0: scale = self._handle_scale(params, **kwds) llf -= 1/scale * pen_weight * self.penal.func(params) return llf
Log-likelihood of model at params
loglike
python
statsmodels/statsmodels
statsmodels/base/_penalized.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/_penalized.py
BSD-3-Clause