code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def _helper_fit_partition(self, pnum, endog, exog, fit_kwds,
init_kwds_e={}):
"""handles the model fitting for each machine. NOTE: this
is primarily handled outside of DistributedModel because
joblib cannot handle class methods.
Parameters
----------
self : DistributedModel class instance
An instance of DistributedModel.
pnum : scalar
index of current partition.
endog : array_like
endogenous data for current partition.
exog : array_like
exogenous data for current partition.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_e : dict-like
Additional init_kwds to add for each partition.
Returns
-------
estimation_method result. For the default,
_est_regularized_debiased, a tuple.
"""
temp_init_kwds = self.init_kwds.copy()
temp_init_kwds.update(init_kwds_e)
model = self.model_class(endog, exog, **temp_init_kwds)
results = self.estimation_method(model, pnum, self.partitions,
fit_kwds=fit_kwds,
**self.estimation_kwds)
return results | handles the model fitting for each machine. NOTE: this
is primarily handled outside of DistributedModel because
joblib cannot handle class methods.
Parameters
----------
self : DistributedModel class instance
An instance of DistributedModel.
pnum : scalar
index of current partition.
endog : array_like
endogenous data for current partition.
exog : array_like
exogenous data for current partition.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_e : dict-like
Additional init_kwds to add for each partition.
Returns
-------
estimation_method result. For the default,
_est_regularized_debiased, a tuple. | _helper_fit_partition | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def fit(self, data_generator, fit_kwds=None, parallel_method="sequential",
parallel_backend=None, init_kwds_generator=None):
"""Performs the distributed estimation using the corresponding
DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like or None
Keywords needed for the model fitting.
parallel_method : str
type of distributed estimation to be used, currently
"sequential", "joblib" and "dask" are supported.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
if fit_kwds is None:
fit_kwds = {}
if parallel_method == "sequential":
results_l = self.fit_sequential(data_generator, fit_kwds,
init_kwds_generator)
elif parallel_method == "joblib":
results_l = self.fit_joblib(data_generator, fit_kwds,
parallel_backend,
init_kwds_generator)
else:
raise ValueError("parallel_method: %s is currently not supported"
% parallel_method)
params = self.join_method(results_l, **self.join_kwds)
# NOTE that currently, the dummy result model that is initialized
# here does not use any init_kwds from the init_kwds_generator event
# if it is provided. It is possible to imagine an edge case where
# this might be a problem but given that the results model instance
# does not correspond to any data partition this seems reasonable.
res_mod = self.model_class([0], [0], **self.init_kwds)
return self.results_class(res_mod, params, **self.results_kwds) | Performs the distributed estimation using the corresponding
DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like or None
Keywords needed for the model fitting.
parallel_method : str
type of distributed estimation to be used, currently
"sequential", "joblib" and "dask" are supported.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array. | fit | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def fit_sequential(self, data_generator, fit_kwds,
init_kwds_generator=None):
"""Sequentially performs the distributed estimation using
the corresponding DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
results_l = []
if init_kwds_generator is None:
for pnum, (endog, exog) in enumerate(data_generator):
results = _helper_fit_partition(self, pnum, endog, exog,
fit_kwds)
results_l.append(results)
else:
tup_gen = enumerate(zip(data_generator,
init_kwds_generator))
for pnum, ((endog, exog), init_kwds_e) in tup_gen:
results = _helper_fit_partition(self, pnum, endog, exog,
fit_kwds, init_kwds_e)
results_l.append(results)
return results_l | Sequentially performs the distributed estimation using
the corresponding DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array. | fit_sequential | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def fit_joblib(self, data_generator, fit_kwds, parallel_backend,
init_kwds_generator=None):
"""Performs the distributed estimation in parallel using joblib
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
from statsmodels.tools.parallel import parallel_func
par, f, n_jobs = parallel_func(_helper_fit_partition, self.partitions)
if parallel_backend is None and init_kwds_generator is None:
results_l = par(f(self, pnum, endog, exog, fit_kwds)
for pnum, (endog, exog)
in enumerate(data_generator))
elif parallel_backend is not None and init_kwds_generator is None:
with parallel_backend:
results_l = par(f(self, pnum, endog, exog, fit_kwds)
for pnum, (endog, exog)
in enumerate(data_generator))
elif parallel_backend is None and init_kwds_generator is not None:
tup_gen = enumerate(zip(data_generator, init_kwds_generator))
results_l = par(f(self, pnum, endog, exog, fit_kwds, init_kwds)
for pnum, ((endog, exog), init_kwds)
in tup_gen)
elif parallel_backend is not None and init_kwds_generator is not None:
tup_gen = enumerate(zip(data_generator, init_kwds_generator))
with parallel_backend:
results_l = par(f(self, pnum, endog, exog, fit_kwds, init_kwds)
for pnum, ((endog, exog), init_kwds)
in tup_gen)
return results_l | Performs the distributed estimation in parallel using joblib
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array. | fit_joblib | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def predict(self, exog, *args, **kwargs):
"""Calls self.model.predict for the provided exog. See
Results.predict.
Parameters
----------
exog : array_like NOT optional
The values for which we want to predict, unlike standard
predict this is NOT optional since the data in self.model
is fake.
*args :
Some models can take additional arguments. See the
predict method of the model for the details.
**kwargs :
Some models can take additional keywords arguments. See the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict
"""
return self.model.predict(self.params, exog, *args, **kwargs) | Calls self.model.predict for the provided exog. See
Results.predict.
Parameters
----------
exog : array_like NOT optional
The values for which we want to predict, unlike standard
predict this is NOT optional since the data in self.model
is fake.
*args :
Some models can take additional arguments. See the
predict method of the model for the details.
**kwargs :
Some models can take additional keywords arguments. See the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict | predict | python | statsmodels/statsmodels | statsmodels/base/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/distributed_estimation.py | BSD-3-Clause |
def _gen_npfuncs(k, L1_wt, alpha, loglike_kwds, score_kwds, hess_kwds):
"""
Negative penalized log-likelihood functions.
Returns the negative penalized log-likelihood, its derivative, and
its Hessian. The penalty only includes the smooth (L2) term.
All three functions have argument signature (x, model), where
``x`` is a point in the parameter space and ``model`` is an
arbitrary statsmodels regression model.
"""
def nploglike(params, model):
nobs = model.nobs
pen_llf = alpha[k] * (1 - L1_wt) * np.sum(params**2) / 2
llf = model.loglike(np.r_[params], **loglike_kwds)
return - llf / nobs + pen_llf
def npscore(params, model):
nobs = model.nobs
pen_grad = alpha[k] * (1 - L1_wt) * params
gr = -model.score(np.r_[params], **score_kwds)[0] / nobs
return gr + pen_grad
def nphess(params, model):
nobs = model.nobs
pen_hess = alpha[k] * (1 - L1_wt)
h = -model.hessian(np.r_[params], **hess_kwds)[0, 0] / nobs + pen_hess
return h
return nploglike, npscore, nphess | Negative penalized log-likelihood functions.
Returns the negative penalized log-likelihood, its derivative, and
its Hessian. The penalty only includes the smooth (L2) term.
All three functions have argument signature (x, model), where
``x`` is a point in the parameter space and ``model`` is an
arbitrary statsmodels regression model. | _gen_npfuncs | python | statsmodels/statsmodels | statsmodels/base/elastic_net.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/elastic_net.py | BSD-3-Clause |
def fit_elasticnet(model, method="coord_descent", maxiter=100,
alpha=0., L1_wt=1., start_params=None, cnvrg_tol=1e-7,
zero_tol=1e-8, refit=False, check_step=True,
loglike_kwds=None, score_kwds=None, hess_kwds=None):
"""
Return an elastic net regularized fit to a regression model.
Parameters
----------
model : model object
A statsmodels object implementing ``loglike``, ``score``, and
``hessian``.
method : {'coord_descent'}
Only the coordinate descent algorithm is implemented.
maxiter : int
The maximum number of iteration cycles (an iteration cycle
involves running coordinate descent on all variables).
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
start_params : array_like
Starting values for `params`.
cnvrg_tol : scalar
If `params` changes by less than this amount (in sup-norm)
in one iteration cycle, the algorithm terminates with
convergence.
zero_tol : scalar
Any estimated coefficient smaller than this value is
replaced with zero.
refit : bool
If True, the model is refit using only the variables that have
non-zero coefficients in the regularized fit. The refitted
model is not regularized.
check_step : bool
If True, confirm that the first step is an improvement and search
further if it is not.
loglike_kwds : dict-like or None
Keyword arguments for the log-likelihood function.
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
Returns
-------
Results
A results object.
Notes
-----
The ``elastic net`` penalty is a combination of L1 and L2
penalties.
The function that is minimized is:
-loglike/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where |*|_1 and |*|_2 are the L1 and L2 norms.
The computational approach used here is to obtain a quadratic
approximation to the smooth part of the target function:
-loglike/n + alpha*(1-L1_wt)*|params|_2^2/2
then repeatedly optimize the L1 penalized version of this function
along coordinate axes.
"""
k_exog = model.exog.shape[1]
loglike_kwds = {} if loglike_kwds is None else loglike_kwds
score_kwds = {} if score_kwds is None else score_kwds
hess_kwds = {} if hess_kwds is None else hess_kwds
if np.isscalar(alpha):
alpha = alpha * np.ones(k_exog)
# Define starting params
if start_params is None:
params = np.zeros(k_exog)
else:
params = start_params.copy()
btol = 1e-4
params_zero = np.zeros(len(params), dtype=bool)
init_args = model._get_init_kwds()
# we do not need a copy of init_args b/c get_init_kwds provides new dict
init_args['hasconst'] = False
model_offset = init_args.pop('offset', None)
if 'exposure' in init_args and init_args['exposure'] is not None:
if model_offset is None:
model_offset = np.log(init_args.pop('exposure'))
else:
model_offset += np.log(init_args.pop('exposure'))
fgh_list = [
_gen_npfuncs(k, L1_wt, alpha, loglike_kwds, score_kwds, hess_kwds)
for k in range(k_exog)]
converged = False
for itr in range(maxiter):
# Sweep through the parameters
params_save = params.copy()
for k in range(k_exog):
# Under the active set method, if a parameter becomes
# zero we do not try to change it again.
# TODO : give the user the option to switch this off
if params_zero[k]:
continue
# Set the offset to account for the variables that are
# being held fixed in the current coordinate
# optimization.
params0 = params.copy()
params0[k] = 0
offset = np.dot(model.exog, params0)
if model_offset is not None:
offset += model_offset
# Create a one-variable model for optimization.
model_1var = model.__class__(
model.endog, model.exog[:, k], offset=offset, **init_args)
# Do the one-dimensional optimization.
func, grad, hess = fgh_list[k]
params[k] = _opt_1d(
func, grad, hess, model_1var, params[k], alpha[k]*L1_wt,
tol=btol, check_step=check_step)
# Update the active set
if itr > 0 and np.abs(params[k]) < zero_tol:
params_zero[k] = True
params[k] = 0.
# Check for convergence
pchange = np.max(np.abs(params - params_save))
if pchange < cnvrg_tol:
converged = True
break
# Set approximate zero coefficients to be exactly zero
params[np.abs(params) < zero_tol] = 0
if not refit:
results = RegularizedResults(model, params)
results.converged = converged
return RegularizedResultsWrapper(results)
# Fit the reduced model to get standard errors and other
# post-estimation results.
ii = np.flatnonzero(params)
cov = np.zeros((k_exog, k_exog))
init_args = {k: getattr(model, k, None) for k in model._init_keys}
if len(ii) > 0:
model1 = model.__class__(
model.endog, model.exog[:, ii], **init_args)
rslt = model1.fit()
params[ii] = rslt.params
cov[np.ix_(ii, ii)] = rslt.normalized_cov_params
else:
# Hack: no variables were selected but we need to run fit in
# order to get the correct results class. So just fit a model
# with one variable.
model1 = model.__class__(model.endog, model.exog[:, 0], **init_args)
rslt = model1.fit(maxiter=0)
# fit may return a results or a results wrapper
if issubclass(rslt.__class__, wrap.ResultsWrapper):
klass = rslt._results.__class__
else:
klass = rslt.__class__
# Not all models have a scale
if hasattr(rslt, 'scale'):
scale = rslt.scale
else:
scale = 1.
# The degrees of freedom should reflect the number of parameters
# in the refit model, not including the zeros that are displayed
# to indicate which variables were dropped. See issue #1723 for
# discussion about setting df parameters in model and results
# classes.
p, q = model.df_model, model.df_resid
model.df_model = len(ii)
model.df_resid = model.nobs - model.df_model
# Assuming a standard signature for creating results classes.
refit = klass(model, params, cov, scale=scale)
refit.regularized = True
refit.converged = converged
refit.method = method
refit.fit_history = {'iteration': itr + 1}
# Restore df in model class, see issue #1723 for discussion.
model.df_model, model.df_resid = p, q
return refit | Return an elastic net regularized fit to a regression model.
Parameters
----------
model : model object
A statsmodels object implementing ``loglike``, ``score``, and
``hessian``.
method : {'coord_descent'}
Only the coordinate descent algorithm is implemented.
maxiter : int
The maximum number of iteration cycles (an iteration cycle
involves running coordinate descent on all variables).
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
start_params : array_like
Starting values for `params`.
cnvrg_tol : scalar
If `params` changes by less than this amount (in sup-norm)
in one iteration cycle, the algorithm terminates with
convergence.
zero_tol : scalar
Any estimated coefficient smaller than this value is
replaced with zero.
refit : bool
If True, the model is refit using only the variables that have
non-zero coefficients in the regularized fit. The refitted
model is not regularized.
check_step : bool
If True, confirm that the first step is an improvement and search
further if it is not.
loglike_kwds : dict-like or None
Keyword arguments for the log-likelihood function.
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
Returns
-------
Results
A results object.
Notes
-----
The ``elastic net`` penalty is a combination of L1 and L2
penalties.
The function that is minimized is:
-loglike/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where |*|_1 and |*|_2 are the L1 and L2 norms.
The computational approach used here is to obtain a quadratic
approximation to the smooth part of the target function:
-loglike/n + alpha*(1-L1_wt)*|params|_2^2/2
then repeatedly optimize the L1 penalized version of this function
along coordinate axes. | fit_elasticnet | python | statsmodels/statsmodels | statsmodels/base/elastic_net.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/elastic_net.py | BSD-3-Clause |
def _opt_1d(func, grad, hess, model, start, L1_wt, tol,
check_step=True):
"""
One-dimensional helper for elastic net.
Parameters
----------
func : function
A smooth function of a single variable to be optimized
with L1 penaty.
grad : function
The gradient of `func`.
hess : function
The Hessian of `func`.
model : statsmodels model
The model being fit.
start : real
A starting value for the function argument
L1_wt : non-negative real
The weight for the L1 penalty function.
tol : non-negative real
A convergence threshold.
check_step : bool
If True, check that the first step is an improvement and
use bisection if it is not. If False, return after the
first step regardless.
Notes
-----
``func``, ``grad``, and ``hess`` have argument signature (x,
model), where ``x`` is a point in the parameter space and
``model`` is the model being fit.
If the log-likelihood for the model is exactly quadratic, the
global minimum is returned in one step. Otherwise numerical
bisection is used.
Returns
-------
The argmin of the objective function.
"""
# Overview:
# We want to minimize L(x) + L1_wt*abs(x), where L() is a smooth
# loss function that includes the log-likelihood and L2 penalty.
# This is a 1-dimensional optimization. If L(x) is exactly
# quadratic we can solve for the argmin exactly. Otherwise we
# approximate L(x) with a quadratic function Q(x) and try to use
# the minimizer of Q(x) + L1_wt*abs(x). But if this yields an
# uphill step for the actual target function L(x) + L1_wt*abs(x),
# then we fall back to a expensive line search. The line search
# is never needed for OLS.
x = start
f = func(x, model)
b = grad(x, model)
c = hess(x, model)
d = b - c*x
# The optimum is achieved by hard thresholding to zero
if L1_wt > np.abs(d):
return 0.
# x + h is the minimizer of the Q(x) + L1_wt*abs(x)
if d >= 0:
h = (L1_wt - b) / c
elif d < 0:
h = -(L1_wt + b) / c
else:
return np.nan
# If the new point is not uphill for the target function, take it
# and return. This check is a bit expensive and un-necessary for
# OLS
if not check_step:
return x + h
f1 = func(x + h, model) + L1_wt*np.abs(x + h)
if f1 <= f + L1_wt*np.abs(x) + 1e-10:
return x + h
# Fallback for models where the loss is not quadratic
from scipy.optimize import brent
x_opt = brent(func, args=(model,), brack=(x-1, x+1), tol=tol)
return x_opt | One-dimensional helper for elastic net.
Parameters
----------
func : function
A smooth function of a single variable to be optimized
with L1 penaty.
grad : function
The gradient of `func`.
hess : function
The Hessian of `func`.
model : statsmodels model
The model being fit.
start : real
A starting value for the function argument
L1_wt : non-negative real
The weight for the L1 penalty function.
tol : non-negative real
A convergence threshold.
check_step : bool
If True, check that the first step is an improvement and
use bisection if it is not. If False, return after the
first step regardless.
Notes
-----
``func``, ``grad``, and ``hess`` have argument signature (x,
model), where ``x`` is a point in the parameter space and
``model`` is the model being fit.
If the log-likelihood for the model is exactly quadratic, the
global minimum is returned in one step. Otherwise numerical
bisection is used.
Returns
-------
The argmin of the objective function. | _opt_1d | python | statsmodels/statsmodels | statsmodels/base/elastic_net.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/elastic_net.py | BSD-3-Clause |
def fittedvalues(self):
"""
The predicted values from the model at the estimated parameters.
"""
return self.model.predict(self.params) | The predicted values from the model at the estimated parameters. | fittedvalues | python | statsmodels/statsmodels | statsmodels/base/elastic_net.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/elastic_net.py | BSD-3-Clause |
def _data_gen(endog, exog, partitions):
"""partitions data"""
n_exog = exog.shape[0]
n_part = np.ceil(n_exog / partitions)
n_part = np.floor(n_exog / partitions)
rem = n_exog - n_part * partitions
stp = 0
while stp < (partitions - 1):
ii = int(n_part * stp)
jj = int(n_part * (stp + 1))
yield endog[ii:jj], exog[ii:jj, :]
stp += 1
ii = int(n_part * stp)
jj = int(n_part * (stp + 1) + rem)
yield endog[ii:jj], exog[ii:jj, :] | partitions data | _data_gen | python | statsmodels/statsmodels | statsmodels/base/tests/test_distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/tests/test_distributed_estimation.py | BSD-3-Clause |
def update(self):
"""
Cycle through all Gibbs updates.
"""
self.update_data()
# Need to update data first
self.update_mean()
self.update_cov() | Cycle through all Gibbs updates. | update | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def update_data(self):
"""
Gibbs update of the missing data values.
"""
for ix in self.patterns:
i = ix[0]
ix_miss = np.flatnonzero(self.mask[i, :])
ix_obs = np.flatnonzero(~self.mask[i, :])
mm = self.mean[ix_miss]
mo = self.mean[ix_obs]
voo = self.cov[ix_obs, :][:, ix_obs]
vmm = self.cov[ix_miss, :][:, ix_miss]
vmo = self.cov[ix_miss, :][:, ix_obs]
r = self._data[ix, :][:, ix_obs] - mo
cm = mm + np.dot(vmo, np.linalg.solve(voo, r.T)).T
cv = vmm - np.dot(vmo, np.linalg.solve(voo, vmo.T))
cs = np.linalg.cholesky(cv)
u = np.random.normal(size=(len(ix), len(ix_miss)))
self._data[np.ix_(ix, ix_miss)] = cm + np.dot(u, cs.T)
# Set the user-visible data set.
if self.exog_names is not None:
self.data = pd.DataFrame(
self._data,
columns=self.exog_names,
copy=False)
else:
self.data = self._data | Gibbs update of the missing data values. | update_data | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def update_mean(self):
"""
Gibbs update of the mean vector.
Do not call until update_data has been called once.
"""
# https://stats.stackexchange.com/questions/28744/multivariate-normal-posterior
# Posterior covariance matrix of the mean
cm = np.linalg.solve(self.cov/self.nobs + self.mean_prior,
self.mean_prior / self.nobs)
cm = np.dot(self.cov, cm)
# Posterior mean of the mean
vm = np.linalg.solve(self.cov, self._data.sum(0))
vm = np.dot(cm, vm)
# Sample
r = np.linalg.cholesky(cm)
self.mean = vm + np.dot(r, np.random.normal(0, 1, self.nvar)) | Gibbs update of the mean vector.
Do not call until update_data has been called once. | update_mean | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def update_cov(self):
"""
Gibbs update of the covariance matrix.
Do not call until update_data has been called once.
"""
# https://stats.stackexchange.com/questions/50844/estimating-the-covariance-posterior-distribution-of-a-multivariate-gaussian
r = self._data - self.mean
gr = np.dot(r.T, r)
a = gr + self.cov_prior
df = int(np.ceil(self.nobs + self.cov_prior_df))
r = np.linalg.cholesky(np.linalg.inv(a))
x = np.dot(np.random.normal(size=(df, self.nvar)), r.T)
ma = np.dot(x.T, x)
self.cov = np.linalg.inv(ma) | Gibbs update of the covariance matrix.
Do not call until update_data has been called once. | update_cov | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def fit(self, results_cb=None):
"""
Impute datasets, fit models, and pool results.
Parameters
----------
results_cb : function, optional
If provided, each results instance r is passed through `results_cb`,
then appended to the `results` attribute of the MIResults object.
To save complete results, use `results_cb=lambda x: x`. The default
behavior is to save no results.
Returns
-------
A MIResults object.
"""
par, cov = [], []
all_results = []
for k in range(self.nrep):
for k in range(self.skip+1):
self.imp.update()
da = self.imp.data
if self.xfunc is not None:
da = self.xfunc(da)
if self.formula is None:
model = self.model(*self.model_args_fn(da),
**self.model_kwds_fn(da))
else:
model = self.model.from_formula(
self.formula, *self.model_args_fn(da),
**self.model_kwds_fn(da))
result = model.fit(*self.fit_args(da), **self.fit_kwds(da))
if results_cb is not None:
all_results.append(results_cb(result))
par.append(np.asarray(result.params.copy()))
cov.append(np.asarray(result.cov_params().copy()))
params, cov_params, fmi = self._combine(par, cov)
r = MIResults(self, model, params, cov_params)
r.fmi = fmi
r.results = all_results
return r | Impute datasets, fit models, and pool results.
Parameters
----------
results_cb : function, optional
If provided, each results instance r is passed through `results_cb`,
then appended to the `results` attribute of the MIResults object.
To save complete results, use `results_cb=lambda x: x`. The default
behavior is to save no results.
Returns
-------
A MIResults object. | fit | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def summary(self, title=None, alpha=.05):
"""
Summarize the results of running multiple imputation.
Parameters
----------
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
Significance level for the confidence intervals
Returns
-------
smry : Summary instance
This holds the summary tables and text, which can be
printed or converted to various output formats.
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
float_format = "%8.3f"
info = {}
info["Method:"] = "MI"
info["Model:"] = self.mi.model.__name__
info["Dependent variable:"] = self._model.endog_names
info["Sample size:"] = "%d" % self.mi.imp.data.shape[0]
info["Num. imputations"] = "%d" % self.mi.nrep
smry.add_dict(info, align='l', float_format=float_format)
param = summary2.summary_params(self, alpha=alpha)
param["FMI"] = self.fmi
smry.add_df(param, float_format=float_format)
smry.add_title(title=title, results=self)
return smry | Summarize the results of running multiple imputation.
Parameters
----------
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
Significance level for the confidence intervals
Returns
-------
smry : Summary instance
This holds the summary tables and text, which can be
printed or converted to various output formats. | summary | python | statsmodels/statsmodels | statsmodels/imputation/bayes_mi.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/bayes_mi.py | BSD-3-Clause |
def _ros_sort(df, observations, censorship, warn=False):
"""
This function prepares a dataframe for ROS.
It sorts ascending with
left-censored observations first. Censored observations larger than
the maximum uncensored observations are removed from the dataframe.
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
------
sorted_df : DataFrame
The sorted dataframe with all columns dropped except the
observation and censorship columns.
"""
# separate uncensored data from censored data
censored = df[df[censorship]].sort_values(observations, axis=0)
uncensored = df[~df[censorship]].sort_values(observations, axis=0)
if censored[observations].max() > uncensored[observations].max():
censored = censored[censored[observations] <= uncensored[observations].max()]
if warn:
msg = ("Dropping censored observations greater than "
"the max uncensored observation.")
warnings.warn(msg)
combined = pd.concat([censored, uncensored], axis=0)
return combined[[observations, censorship]].reset_index(drop=True) | This function prepares a dataframe for ROS.
It sorts ascending with
left-censored observations first. Censored observations larger than
the maximum uncensored observations are removed from the dataframe.
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
------
sorted_df : DataFrame
The sorted dataframe with all columns dropped except the
observation and censorship columns. | _ros_sort | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def nuncen_above(row):
""" A, the number of uncensored obs above the given threshold.
"""
# index of observations above the lower_dl DL
above = df[observations] >= row['lower_dl']
# index of observations below the upper_dl DL
below = df[observations] < row['upper_dl']
# index of non-detect observations
detect = ~df[censorship]
# return the number of observations where all conditions are True
return df[above & below & detect].shape[0] | A, the number of uncensored obs above the given threshold. | cohn_numbers.nuncen_above | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def nobs_below(row):
""" B, the number of observations (cen & uncen) below the given
threshold
"""
# index of data less than the lower_dl DL
less_than = df[observations] < row['lower_dl']
# index of data less than or equal to the lower_dl DL
less_thanequal = df[observations] <= row['lower_dl']
# index of detects, non-detects
uncensored = ~df[censorship]
censored = df[censorship]
# number observations less than or equal to lower_dl DL and non-detect
LTE_censored = df[less_thanequal & censored].shape[0]
# number of observations less than lower_dl DL and detected
LT_uncensored = df[less_than & uncensored].shape[0]
# return the sum
return LTE_censored + LT_uncensored | B, the number of observations (cen & uncen) below the given
threshold | cohn_numbers.nobs_below | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def ncen_equal(row):
""" C, the number of censored observations at the given
threshold.
"""
censored_index = df[censorship]
censored_data = df[observations][censored_index]
censored_below = censored_data == row['lower_dl']
return censored_below.sum() | C, the number of censored observations at the given
threshold. | cohn_numbers.ncen_equal | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def set_upper_limit(cohn):
""" Sets the upper_dl DL for each row of the Cohn dataframe. """
if cohn.shape[0] > 1:
return cohn['lower_dl'].shift(-1).fillna(value=np.inf)
else:
return [np.inf] | Sets the upper_dl DL for each row of the Cohn dataframe. | cohn_numbers.set_upper_limit | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def compute_PE(A, B):
""" Computes the probability of excedance for each row of the
Cohn dataframe. """
N = len(A)
PE = np.empty(N, dtype='float64')
PE[-1] = 0.0
for j in range(N-2, -1, -1):
PE[j] = PE[j+1] + (1 - PE[j+1]) * A[j] / (A[j] + B[j])
return PE | Computes the probability of excedance for each row of the
Cohn dataframe. | cohn_numbers.compute_PE | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def cohn_numbers(df, observations, censorship):
r"""
Computes the Cohn numbers for the detection limits in the dataset.
The Cohn Numbers are:
- :math:`A_j =` the number of uncensored obs above the jth
threshold.
- :math:`B_j =` the number of observations (cen & uncen) below
the jth threshold.
- :math:`C_j =` the number of censored observations at the jth
threshold.
- :math:`\mathrm{PE}_j =` the probability of exceeding the jth
threshold
- :math:`\mathrm{DL}_j =` the unique, sorted detection limits
- :math:`\mathrm{DL}_{j+1} = \mathrm{DL}_j` shifted down a
single index (row)
Parameters
----------
dataframe : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
cohn : DataFrame
"""
def nuncen_above(row):
""" A, the number of uncensored obs above the given threshold.
"""
# index of observations above the lower_dl DL
above = df[observations] >= row['lower_dl']
# index of observations below the upper_dl DL
below = df[observations] < row['upper_dl']
# index of non-detect observations
detect = ~df[censorship]
# return the number of observations where all conditions are True
return df[above & below & detect].shape[0]
def nobs_below(row):
""" B, the number of observations (cen & uncen) below the given
threshold
"""
# index of data less than the lower_dl DL
less_than = df[observations] < row['lower_dl']
# index of data less than or equal to the lower_dl DL
less_thanequal = df[observations] <= row['lower_dl']
# index of detects, non-detects
uncensored = ~df[censorship]
censored = df[censorship]
# number observations less than or equal to lower_dl DL and non-detect
LTE_censored = df[less_thanequal & censored].shape[0]
# number of observations less than lower_dl DL and detected
LT_uncensored = df[less_than & uncensored].shape[0]
# return the sum
return LTE_censored + LT_uncensored
def ncen_equal(row):
""" C, the number of censored observations at the given
threshold.
"""
censored_index = df[censorship]
censored_data = df[observations][censored_index]
censored_below = censored_data == row['lower_dl']
return censored_below.sum()
def set_upper_limit(cohn):
""" Sets the upper_dl DL for each row of the Cohn dataframe. """
if cohn.shape[0] > 1:
return cohn['lower_dl'].shift(-1).fillna(value=np.inf)
else:
return [np.inf]
def compute_PE(A, B):
""" Computes the probability of excedance for each row of the
Cohn dataframe. """
N = len(A)
PE = np.empty(N, dtype='float64')
PE[-1] = 0.0
for j in range(N-2, -1, -1):
PE[j] = PE[j+1] + (1 - PE[j+1]) * A[j] / (A[j] + B[j])
return PE
# unique, sorted detection limts
censored_data = df[censorship]
DLs = pd.unique(df.loc[censored_data, observations])
DLs.sort()
# if there is a observations smaller than the minimum detection limit,
# add that value to the array
if DLs.shape[0] > 0:
if df[observations].min() < DLs.min():
DLs = np.hstack([df[observations].min(), DLs])
# create a dataframe
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
cohn = pd.DataFrame(DLs, columns=['lower_dl'])
cohn.loc[:, 'upper_dl'] = set_upper_limit(cohn)
cohn.loc[:, 'nuncen_above'] = cohn.apply(nuncen_above, axis=1)
cohn.loc[:, 'nobs_below'] = cohn.apply(nobs_below, axis=1)
cohn.loc[:, 'ncen_equal'] = cohn.apply(ncen_equal, axis=1)
cohn = cohn.reindex(range(DLs.shape[0] + 1))
cohn.loc[:, 'prob_exceedance'] = compute_PE(cohn['nuncen_above'], cohn['nobs_below'])
else:
dl_cols = ['lower_dl', 'upper_dl', 'nuncen_above',
'nobs_below', 'ncen_equal', 'prob_exceedance']
cohn = pd.DataFrame(np.empty((0, len(dl_cols))), columns=dl_cols)
return cohn | def nuncen_above(row):
""" A, the number of uncensored obs above the given threshold. | cohn_numbers | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _detection_limit_index(obs, cohn):
"""
Locates the corresponding detection limit for each observation.
Basically, creates an array of indices for the detection limits
(Cohn numbers) corresponding to each data point.
Parameters
----------
obs : float
A single observation from the larger dataset.
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
det_limit_index : int
The index of the corresponding detection limit in `cohn`
See Also
--------
cohn_numbers
"""
if cohn.shape[0] > 0:
index, = np.where(cohn['lower_dl'] <= obs)
det_limit_index = index[-1]
else:
det_limit_index = 0
return det_limit_index | Locates the corresponding detection limit for each observation.
Basically, creates an array of indices for the detection limits
(Cohn numbers) corresponding to each data point.
Parameters
----------
obs : float
A single observation from the larger dataset.
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
det_limit_index : int
The index of the corresponding detection limit in `cohn`
See Also
--------
cohn_numbers | _detection_limit_index | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _ros_group_rank(df, dl_idx, censorship):
"""
Ranks each observation within the data groups.
In this case, the groups are defined by the record's detection
limit index and censorship status.
Parameters
----------
df : DataFrame
dl_idx : str
Name of the column in the dataframe the index of the
observations' corresponding detection limit in the `cohn`
dataframe.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
ranks : ndarray
Array of ranks for the dataset.
"""
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
ranks = df.copy()
ranks.loc[:, 'rank'] = 1
ranks = (
ranks.groupby(by=[dl_idx, censorship])['rank']
.transform(lambda g: g.cumsum())
)
return ranks | Ranks each observation within the data groups.
In this case, the groups are defined by the record's detection
limit index and censorship status.
Parameters
----------
df : DataFrame
dl_idx : str
Name of the column in the dataframe the index of the
observations' corresponding detection limit in the `cohn`
dataframe.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
ranks : ndarray
Array of ranks for the dataset. | _ros_group_rank | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _ros_plot_pos(row, censorship, cohn):
"""
ROS-specific plotting positions.
Computes the plotting position for an observation based on its rank,
censorship status, and detection limit index.
Parameters
----------
row : {Series, dict}
Full observation (row) from a censored dataset. Requires a
'rank', 'detection_limit', and `censorship` column.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
plotting_position : float
See Also
--------
cohn_numbers
"""
DL_index = row['det_limit_index']
rank = row['rank']
censored = row[censorship]
dl_1 = cohn.iloc[DL_index]
dl_2 = cohn.iloc[DL_index + 1]
if censored:
return (1 - dl_1['prob_exceedance']) * rank / (dl_1['ncen_equal']+1)
else:
return (1 - dl_1['prob_exceedance']) + (dl_1['prob_exceedance'] - dl_2['prob_exceedance']) * \
rank / (dl_1['nuncen_above']+1) | ROS-specific plotting positions.
Computes the plotting position for an observation based on its rank,
censorship status, and detection limit index.
Parameters
----------
row : {Series, dict}
Full observation (row) from a censored dataset. Requires a
'rank', 'detection_limit', and `censorship` column.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
plotting_position : float
See Also
--------
cohn_numbers | _ros_plot_pos | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _norm_plot_pos(observations):
"""
Computes standard normal (Gaussian) plotting positions using scipy.
Parameters
----------
observations : array_like
Sequence of observed quantities.
Returns
-------
plotting_position : array of floats
"""
ppos, sorted_res = stats.probplot(observations, fit=False)
return stats.norm.cdf(ppos) | Computes standard normal (Gaussian) plotting positions using scipy.
Parameters
----------
observations : array_like
Sequence of observed quantities.
Returns
-------
plotting_position : array of floats | _norm_plot_pos | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def plotting_positions(df, censorship, cohn):
"""
Compute the plotting positions for the observations.
The ROS-specific plotting postions are based on the observations'
rank, censorship status, and corresponding detection limit.
Parameters
----------
df : DataFrame
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
plotting_position : array of float
See Also
--------
cohn_numbers
"""
plot_pos = df.apply(lambda r: _ros_plot_pos(r, censorship, cohn), axis=1)
# correctly sort the plotting positions of the ND data:
ND_plotpos = plot_pos[df[censorship]]
ND_plotpos_arr = np.sort(np.array(ND_plotpos))
plot_pos.loc[df[censorship].index[df[censorship]]] = ND_plotpos_arr
return plot_pos | Compute the plotting positions for the observations.
The ROS-specific plotting postions are based on the observations'
rank, censorship status, and corresponding detection limit.
Parameters
----------
df : DataFrame
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : DataFrame
DataFrame of Cohn numbers.
Returns
-------
plotting_position : array of float
See Also
--------
cohn_numbers | plotting_positions | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _impute(df, observations, censorship, transform_in, transform_out):
"""
Executes the basic regression on order stat (ROS) proceedure.
Uses ROS to impute censored from the best-fit line of a
probability plot of the uncensored values.
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`np.log` and `np.exp` are used, respectively.
Returns
-------
estimated : DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else.
"""
# detect/non-detect selectors
uncensored_mask = ~df[censorship]
censored_mask = df[censorship]
# fit a line to the logs of the detected data
fit_params = stats.linregress(
df['Zprelim'][uncensored_mask],
transform_in(df[observations][uncensored_mask])
)
# pull out the slope and intercept for use later
slope, intercept = fit_params[:2]
# model the data based on the best-fit curve
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
df.loc[:, 'estimated'] = transform_out(slope * df['Zprelim'][censored_mask] + intercept)
df.loc[:, 'final'] = np.where(df[censorship], df['estimated'], df[observations])
return df | Executes the basic regression on order stat (ROS) proceedure.
Uses ROS to impute censored from the best-fit line of a
probability plot of the uncensored values.
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`np.log` and `np.exp` are used, respectively.
Returns
-------
estimated : DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else. | _impute | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def _do_ros(df, observations, censorship, transform_in, transform_out):
"""
DataFrame-centric function to impute censored valies with ROS.
Prepares a dataframe for, and then esimates the values of a censored
dataset using Regression on Order Statistics
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`np.log` and `np.exp` are used, respectively.
Returns
-------
estimated : DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else.
"""
# compute the Cohn numbers
cohn = cohn_numbers(df, observations=observations, censorship=censorship)
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
modeled = _ros_sort(df, observations=observations, censorship=censorship)
modeled.loc[:, 'det_limit_index'] = modeled[observations].apply(_detection_limit_index, args=(cohn,))
modeled.loc[:, 'rank'] = _ros_group_rank(modeled, 'det_limit_index', censorship)
modeled.loc[:, 'plot_pos'] = plotting_positions(modeled, censorship, cohn)
modeled.loc[:, 'Zprelim'] = stats.norm.ppf(modeled['plot_pos'])
return _impute(modeled, observations, censorship, transform_in, transform_out) | DataFrame-centric function to impute censored valies with ROS.
Prepares a dataframe for, and then esimates the values of a censored
dataset using Regression on Order Statistics
Parameters
----------
df : DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`np.log` and `np.exp` are used, respectively.
Returns
-------
estimated : DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else. | _do_ros | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def impute_ros(observations, censorship, df=None, min_uncensored=2,
max_fraction_censored=0.8, substitution_fraction=0.5,
transform_in=np.log, transform_out=np.exp,
as_array=True):
"""
Impute censored dataset using Regression on Order Statistics (ROS).
Method described in *Nondetects and Data Analysis* by Dennis R.
Helsel (John Wiley, 2005) to estimate the left-censored (non-detect)
values of a dataset. When there is insufficient non-censorded data,
simple substitution is used.
Parameters
----------
observations : str or array-like
Label of the column or the float array of censored observations
censorship : str
Label of the column or the bool array of the censorship
status of the observations.
* True if censored,
* False if uncensored
df : DataFrame, optional
If `observations` and `censorship` are labels, this is the
DataFrame that contains those columns.
min_uncensored : int (default is 2)
The minimum number of uncensored values required before ROS
can be used to impute the censored observations. When this
criterion is not met, simple substituion is used instead.
max_fraction_censored : float (default is 0.8)
The maximum fraction of censored data below which ROS can be
used to impute the censored observations. When this fraction is
exceeded, simple substituion is used instead.
substitution_fraction : float (default is 0.5)
The fraction of the detection limit to be used during simple
substitution of the censored values.
transform_in : callable (default is np.log)
Transformation to be applied to the values prior to fitting a
line to the plotting positions vs. uncensored values.
transform_out : callable (default is np.exp)
Transformation to be applied to the imputed censored values
estimated from the previously computed best-fit line.
as_array : bool (default is True)
When True, a numpy array of the imputed observations is
returned. Otherwise, a modified copy of the original dataframe
with all of the intermediate calculations is returned.
Returns
-------
imputed : {ndarray, DataFrame}
The final observations where the censored values have either been
imputed through ROS or substituted as a fraction of the
detection limit.
Notes
-----
This function requires pandas 0.14 or more recent.
"""
# process arrays into a dataframe, if necessary
if df is None:
df = pd.DataFrame({'obs': observations, 'cen': censorship})
observations = 'obs'
censorship = 'cen'
# basic counts/metrics of the dataset
N_observations = df.shape[0]
N_censored = df[censorship].astype(int).sum()
N_uncensored = N_observations - N_censored
fraction_censored = N_censored / N_observations
# add plotting positions if there are no censored values
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
if N_censored == 0:
output = df[[observations, censorship]].copy()
output.loc[:, 'final'] = df[observations]
# substitute w/ fraction of the DLs if there's insufficient
# uncensored data
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
elif (N_uncensored < min_uncensored) or (fraction_censored > max_fraction_censored):
output = df[[observations, censorship]].copy()
output.loc[:, 'final'] = df[observations]
output.loc[df[censorship], 'final'] *= substitution_fraction
# normal ROS stuff
else:
output = _do_ros(df, observations, censorship, transform_in, transform_out)
# convert to an array if necessary
if as_array:
output = output['final'].values
return output | Impute censored dataset using Regression on Order Statistics (ROS).
Method described in *Nondetects and Data Analysis* by Dennis R.
Helsel (John Wiley, 2005) to estimate the left-censored (non-detect)
values of a dataset. When there is insufficient non-censorded data,
simple substitution is used.
Parameters
----------
observations : str or array-like
Label of the column or the float array of censored observations
censorship : str
Label of the column or the bool array of the censorship
status of the observations.
* True if censored,
* False if uncensored
df : DataFrame, optional
If `observations` and `censorship` are labels, this is the
DataFrame that contains those columns.
min_uncensored : int (default is 2)
The minimum number of uncensored values required before ROS
can be used to impute the censored observations. When this
criterion is not met, simple substituion is used instead.
max_fraction_censored : float (default is 0.8)
The maximum fraction of censored data below which ROS can be
used to impute the censored observations. When this fraction is
exceeded, simple substituion is used instead.
substitution_fraction : float (default is 0.5)
The fraction of the detection limit to be used during simple
substitution of the censored values.
transform_in : callable (default is np.log)
Transformation to be applied to the values prior to fitting a
line to the plotting positions vs. uncensored values.
transform_out : callable (default is np.exp)
Transformation to be applied to the imputed censored values
estimated from the previously computed best-fit line.
as_array : bool (default is True)
When True, a numpy array of the imputed observations is
returned. Otherwise, a modified copy of the original dataframe
with all of the intermediate calculations is returned.
Returns
-------
imputed : {ndarray, DataFrame}
The final observations where the censored values have either been
imputed through ROS or substituted as a fraction of the
detection limit.
Notes
-----
This function requires pandas 0.14 or more recent. | impute_ros | python | statsmodels/statsmodels | statsmodels/imputation/ros.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/ros.py | BSD-3-Clause |
def next_sample(self):
"""
Returns the next imputed dataset in the imputation process.
Returns
-------
data : array_like
An imputed dataset from the MICE chain.
Notes
-----
`MICEData` does not have a `skip` parameter. Consecutive
values returned by `next_sample` are immediately consecutive
in the imputation chain.
The returned value is a reference to the data attribute of
the class and should be copied before making any changes.
"""
self.update_all(1)
return self.data | Returns the next imputed dataset in the imputation process.
Returns
-------
data : array_like
An imputed dataset from the MICE chain.
Notes
-----
`MICEData` does not have a `skip` parameter. Consecutive
values returned by `next_sample` are immediately consecutive
in the imputation chain.
The returned value is a reference to the data attribute of
the class and should be copied before making any changes. | next_sample | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def _initial_imputation(self):
"""
Use a PMM-like procedure for initial imputed values.
For each variable, missing values are imputed as the observed
value that is closest to the mean over all observed values.
"""
# Changed for pandas 2.0 copy-on-write behavior to use a single
# in-place fill
imp_values = {}
for col in self.data.columns:
di = self.data[col] - self.data[col].mean()
di = np.abs(di)
ix = di.idxmin()
imp_values[col] = self.data[col].loc[ix]
self.data.fillna(imp_values, inplace=True) | Use a PMM-like procedure for initial imputed values.
For each variable, missing values are imputed as the observed
value that is closest to the mean over all observed values. | _initial_imputation | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def set_imputer(self, endog_name, formula=None, model_class=None,
init_kwds=None, fit_kwds=None, predict_kwds=None,
k_pmm=20, perturbation_method=None, regularized=False):
"""
Specify the imputation process for a single variable.
Parameters
----------
endog_name : str
Name of the variable to be imputed.
formula : str
Conditional formula for imputation. Defaults to a formula
with main effects for all other variables in dataset. The
formula should only include an expression for the mean
structure, e.g. use 'x1 + x2' not 'x4 ~ x1 + x2'.
model_class : statsmodels model
Conditional model for imputation. Defaults to OLS. See below
for more information.
init_kwds : dit-like
Keyword arguments passed to the model init method.
fit_kwds : dict-like
Keyword arguments passed to the model fit method.
predict_kwds : dict-like
Keyword arguments passed to the model predict method.
k_pmm : int
Determines number of neighboring observations from which
to randomly sample when using predictive mean matching.
perturbation_method : str
Either 'gaussian' or 'bootstrap'. Determines the method
for perturbing parameters in the imputation model. If
None, uses the default specified at class initialization.
regularized : dict
If regularized[name]=True, `fit_regularized` rather than
`fit` is called when fitting imputation models for this
variable. When regularized[name]=True for any variable,
perturbation_method must be set to boot.
Notes
-----
The model class must meet the following conditions:
* A model must have a 'fit' method that returns an object.
* The object returned from `fit` must have a `params` attribute
that is an array-like object.
* The object returned from `fit` must have a cov_params method
that returns a square array-like object.
* The model must have a `predict` method.
"""
if formula is None:
main_effects = [x for x in self.data.columns
if x != endog_name]
fml = endog_name + " ~ " + " + ".join(main_effects)
self.conditional_formula[endog_name] = fml
else:
fml = endog_name + " ~ " + formula
self.conditional_formula[endog_name] = fml
if model_class is None:
self.model_class[endog_name] = OLS
else:
self.model_class[endog_name] = model_class
if init_kwds is not None:
self.init_kwds[endog_name] = init_kwds
if fit_kwds is not None:
self.fit_kwds[endog_name] = fit_kwds
if predict_kwds is not None:
self.predict_kwds[endog_name] = predict_kwds
if perturbation_method is not None:
self.perturbation_method[endog_name] = perturbation_method
self.k_pmm = k_pmm
self.regularized[endog_name] = regularized | Specify the imputation process for a single variable.
Parameters
----------
endog_name : str
Name of the variable to be imputed.
formula : str
Conditional formula for imputation. Defaults to a formula
with main effects for all other variables in dataset. The
formula should only include an expression for the mean
structure, e.g. use 'x1 + x2' not 'x4 ~ x1 + x2'.
model_class : statsmodels model
Conditional model for imputation. Defaults to OLS. See below
for more information.
init_kwds : dit-like
Keyword arguments passed to the model init method.
fit_kwds : dict-like
Keyword arguments passed to the model fit method.
predict_kwds : dict-like
Keyword arguments passed to the model predict method.
k_pmm : int
Determines number of neighboring observations from which
to randomly sample when using predictive mean matching.
perturbation_method : str
Either 'gaussian' or 'bootstrap'. Determines the method
for perturbing parameters in the imputation model. If
None, uses the default specified at class initialization.
regularized : dict
If regularized[name]=True, `fit_regularized` rather than
`fit` is called when fitting imputation models for this
variable. When regularized[name]=True for any variable,
perturbation_method must be set to boot.
Notes
-----
The model class must meet the following conditions:
* A model must have a 'fit' method that returns an object.
* The object returned from `fit` must have a `params` attribute
that is an array-like object.
* The object returned from `fit` must have a cov_params method
that returns a square array-like object.
* The model must have a `predict` method. | set_imputer | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def _store_changes(self, col, vals):
"""
Fill in dataset with imputed values.
Parameters
----------
col : str
Name of variable to be filled in.
vals : ndarray
Array of imputed values to use for filling-in missing values.
"""
ix = self.ix_miss[col]
if len(ix) > 0:
self.data.iloc[ix, self.data.columns.get_loc(col)] = np.atleast_1d(vals) | Fill in dataset with imputed values.
Parameters
----------
col : str
Name of variable to be filled in.
vals : ndarray
Array of imputed values to use for filling-in missing values. | _store_changes | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def update_all(self, n_iter=1):
"""
Perform a specified number of MICE iterations.
Parameters
----------
n_iter : int
The number of updates to perform. Only the result of the
final update will be available.
Notes
-----
The imputed values are stored in the class attribute `self.data`.
"""
for k in range(n_iter):
for vname in self._cycle_order:
self.update(vname)
if self.history_callback is not None:
hv = self.history_callback(self)
self.history.append(hv) | Perform a specified number of MICE iterations.
Parameters
----------
n_iter : int
The number of updates to perform. Only the result of the
final update will be available.
Notes
-----
The imputed values are stored in the class attribute `self.data`. | update_all | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def get_split_data(self, vname):
"""
Return endog and exog for imputation of a given variable.
Parameters
----------
vname : str
The variable for which the split data is returned.
Returns
-------
endog_obs : DataFrame
Observed values of the variable to be imputed.
exog_obs : DataFrame
Current values of the predictors where the variable to be
imputed is observed.
exog_miss : DataFrame
Current values of the predictors where the variable to be
Imputed is missing.
init_kwds : dict-like
The init keyword arguments for `vname`, processed through Patsy
as required.
fit_kwds : dict-like
The fit keyword arguments for `vname`, processed through Patsy
as required.
"""
formula = self.conditional_formula[vname]
mgr = FormulaManager()
endog, exog = mgr.get_matrices(formula, self.data, pandas=True)
# Rows with observed endog
ixo = self.ix_obs[vname]
endog_obs = np.require(endog.iloc[ixo], requirements="W")
exog_obs = np.require(exog.iloc[ixo, :], requirements="W")
# Rows with missing endog
ixm = self.ix_miss[vname]
exog_miss = np.require(exog.iloc[ixm, :], requirements="W")
predict_obs_kwds = {}
if vname in self.predict_kwds:
kwds = self.predict_kwds[vname]
predict_obs_kwds = self._process_kwds(kwds, ixo)
predict_miss_kwds = {}
if vname in self.predict_kwds:
kwds = self.predict_kwds[vname]
predict_miss_kwds = self._process_kwds(kwds, ixo)
return (endog_obs, exog_obs, exog_miss, predict_obs_kwds,
predict_miss_kwds) | Return endog and exog for imputation of a given variable.
Parameters
----------
vname : str
The variable for which the split data is returned.
Returns
-------
endog_obs : DataFrame
Observed values of the variable to be imputed.
exog_obs : DataFrame
Current values of the predictors where the variable to be
imputed is observed.
exog_miss : DataFrame
Current values of the predictors where the variable to be
Imputed is missing.
init_kwds : dict-like
The init keyword arguments for `vname`, processed through Patsy
as required.
fit_kwds : dict-like
The fit keyword arguments for `vname`, processed through Patsy
as required. | get_split_data | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def get_fitting_data(self, vname):
"""
Return the data needed to fit a model for imputation.
The data is used to impute variable `vname`, and therefore
only includes cases for which `vname` is observed.
Values of type `PatsyFormula` in `init_kwds` or `fit_kwds` are
processed through Patsy and subset to align with the model's
endog and exog.
Parameters
----------
vname : str
The variable for which the fitting data is returned.
Returns
-------
endog : DataFrame
Observed values of `vname`.
exog : DataFrame
Regression design matrix for imputing `vname`.
init_kwds : dict-like
The init keyword arguments for `vname`, processed through Patsy
as required.
fit_kwds : dict-like
The fit keyword arguments for `vname`, processed through Patsy
as required.
"""
# Rows with observed endog
ix = self.ix_obs[vname]
formula = self.conditional_formula[vname]
mgr = FormulaManager()
endog, exog = mgr.get_matrices(formula, self.data, pandas=True)
endog = np.require(endog.iloc[ix, 0], requirements="W")
exog = np.require(exog.iloc[ix, :], requirements="W")
init_kwds = self._process_kwds(self.init_kwds[vname], ix)
fit_kwds = self._process_kwds(self.fit_kwds[vname], ix)
return endog, exog, init_kwds, fit_kwds | Return the data needed to fit a model for imputation.
The data is used to impute variable `vname`, and therefore
only includes cases for which `vname` is observed.
Values of type `PatsyFormula` in `init_kwds` or `fit_kwds` are
processed through Patsy and subset to align with the model's
endog and exog.
Parameters
----------
vname : str
The variable for which the fitting data is returned.
Returns
-------
endog : DataFrame
Observed values of `vname`.
exog : DataFrame
Regression design matrix for imputing `vname`.
init_kwds : dict-like
The init keyword arguments for `vname`, processed through Patsy
as required.
fit_kwds : dict-like
The fit keyword arguments for `vname`, processed through Patsy
as required. | get_fitting_data | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def plot_missing_pattern(self, ax=None, row_order="pattern",
column_order="pattern",
hide_complete_rows=False,
hide_complete_columns=False,
color_row_patterns=True):
"""
Generate an image showing the missing data pattern.
Parameters
----------
ax : AxesSubplot
Axes on which to draw the plot.
row_order : str
The method for ordering the rows. Must be one of 'pattern',
'proportion', or 'raw'.
column_order : str
The method for ordering the columns. Must be one of 'pattern',
'proportion', or 'raw'.
hide_complete_rows : bool
If True, rows with no missing values are not drawn.
hide_complete_columns : bool
If True, columns with no missing values are not drawn.
color_row_patterns : bool
If True, color the unique row patterns, otherwise use grey
and white as colors.
Returns
-------
A figure containing a plot of the missing data pattern.
"""
# Create an indicator matrix for missing values.
miss = np.zeros(self.data.shape)
cols = self.data.columns
for j, col in enumerate(cols):
ix = self.ix_miss[col]
miss[ix, j] = 1
# Order the columns as requested
if column_order == "proportion":
ix = np.argsort(miss.mean(0))
elif column_order == "pattern":
cv = np.cov(miss.T)
u, s, vt = np.linalg.svd(cv, 0)
ix = np.argsort(cv[:, 0])
elif column_order == "raw":
ix = np.arange(len(cols))
else:
raise ValueError(
column_order + " is not an allowed value for `column_order`.")
miss = miss[:, ix]
cols = [cols[i] for i in ix]
# Order the rows as requested
if row_order == "proportion":
ix = np.argsort(miss.mean(1))
elif row_order == "pattern":
x = 2**np.arange(miss.shape[1])
rky = np.dot(miss, x)
ix = np.argsort(rky)
elif row_order == "raw":
ix = np.arange(miss.shape[0])
else:
raise ValueError(
row_order + " is not an allowed value for `row_order`.")
miss = miss[ix, :]
if hide_complete_rows:
ix = np.flatnonzero((miss == 1).any(1))
miss = miss[ix, :]
if hide_complete_columns:
ix = np.flatnonzero((miss == 1).any(0))
miss = miss[:, ix]
cols = [cols[i] for i in ix]
from matplotlib.colors import LinearSegmentedColormap
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
if color_row_patterns:
x = 2**np.arange(miss.shape[1])
rky = np.dot(miss, x)
_, rcol = np.unique(rky, return_inverse=True)
miss *= 1 + rcol[:, None]
ax.imshow(miss, aspect="auto", interpolation="nearest",
cmap='gist_ncar_r')
else:
cmap = LinearSegmentedColormap.from_list("_",
["white", "darkgrey"])
ax.imshow(miss, aspect="auto", interpolation="nearest",
cmap=cmap)
ax.set_ylabel("Cases")
ax.set_xticks(range(len(cols)))
ax.set_xticklabels(cols, rotation=90)
return fig | Generate an image showing the missing data pattern.
Parameters
----------
ax : AxesSubplot
Axes on which to draw the plot.
row_order : str
The method for ordering the rows. Must be one of 'pattern',
'proportion', or 'raw'.
column_order : str
The method for ordering the columns. Must be one of 'pattern',
'proportion', or 'raw'.
hide_complete_rows : bool
If True, rows with no missing values are not drawn.
hide_complete_columns : bool
If True, columns with no missing values are not drawn.
color_row_patterns : bool
If True, color the unique row patterns, otherwise use grey
and white as colors.
Returns
-------
A figure containing a plot of the missing data pattern. | plot_missing_pattern | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def plot_bivariate(self, col1_name, col2_name,
lowess_args=None, lowess_min_n=40,
jitter=None, plot_points=True, ax=None):
"""
Plot observed and imputed values for two variables.
Displays a scatterplot of one variable against another. The
points are colored according to whether the values are
observed or imputed.
Parameters
----------
col1_name : str
The variable to be plotted on the horizontal axis.
col2_name : str
The variable to be plotted on the vertical axis.
lowess_args : dictionary
A dictionary of dictionaries, keys are 'ii', 'io', 'oi'
and 'oo', where 'o' denotes 'observed' and 'i' denotes
imputed. See Notes for details.
lowess_min_n : int
Minimum sample size to plot a lowess fit
jitter : float or tuple
Standard deviation for jittering points in the plot.
Either a single scalar applied to both axes, or a tuple
containing x-axis jitter and y-axis jitter, respectively.
plot_points : bool
If True, the data points are plotted.
ax : AxesSubplot
Axes on which to plot, created if not provided.
Returns
-------
The matplotlib figure on which the plot id drawn.
"""
from statsmodels.graphics import utils as gutils
from statsmodels.nonparametric.smoothers_lowess import lowess
if lowess_args is None:
lowess_args = {}
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
ax.set_position([0.1, 0.1, 0.7, 0.8])
ix1i = self.ix_miss[col1_name]
ix1o = self.ix_obs[col1_name]
ix2i = self.ix_miss[col2_name]
ix2o = self.ix_obs[col2_name]
ix_ii = np.intersect1d(ix1i, ix2i)
ix_io = np.intersect1d(ix1i, ix2o)
ix_oi = np.intersect1d(ix1o, ix2i)
ix_oo = np.intersect1d(ix1o, ix2o)
vec1 = np.require(self.data[col1_name], requirements="W")
vec2 = np.require(self.data[col2_name], requirements="W")
if jitter is not None:
if np.isscalar(jitter):
jitter = (jitter, jitter)
vec1 += jitter[0] * np.random.normal(size=len(vec1))
vec2 += jitter[1] * np.random.normal(size=len(vec2))
# Plot the points
keys = ['oo', 'io', 'oi', 'ii']
lak = {'i': 'imp', 'o': 'obs'}
ixs = {'ii': ix_ii, 'io': ix_io, 'oi': ix_oi, 'oo': ix_oo}
color = {'oo': 'grey', 'ii': 'red', 'io': 'orange',
'oi': 'lime'}
if plot_points:
for ky in keys:
ix = ixs[ky]
lab = lak[ky[0]] + "/" + lak[ky[1]]
ax.plot(vec1[ix], vec2[ix], 'o', color=color[ky],
label=lab, alpha=0.6)
# Plot the lowess fits
for ky in keys:
ix = ixs[ky]
if len(ix) < lowess_min_n:
continue
if ky in lowess_args:
la = lowess_args[ky]
else:
la = {}
ix = ixs[ky]
lfit = lowess(vec2[ix], vec1[ix], **la)
if plot_points:
ax.plot(lfit[:, 0], lfit[:, 1], '-', color=color[ky],
alpha=0.6, lw=4)
else:
lab = lak[ky[0]] + "/" + lak[ky[1]]
ax.plot(lfit[:, 0], lfit[:, 1], '-', color=color[ky],
alpha=0.6, lw=4, label=lab)
ha, la = ax.get_legend_handles_labels()
pad = 0.0001 if plot_points else 0.5
leg = fig.legend(ha, la, loc='center right', numpoints=1,
handletextpad=pad)
leg.draw_frame(False)
ax.set_xlabel(col1_name)
ax.set_ylabel(col2_name)
return fig | Plot observed and imputed values for two variables.
Displays a scatterplot of one variable against another. The
points are colored according to whether the values are
observed or imputed.
Parameters
----------
col1_name : str
The variable to be plotted on the horizontal axis.
col2_name : str
The variable to be plotted on the vertical axis.
lowess_args : dictionary
A dictionary of dictionaries, keys are 'ii', 'io', 'oi'
and 'oo', where 'o' denotes 'observed' and 'i' denotes
imputed. See Notes for details.
lowess_min_n : int
Minimum sample size to plot a lowess fit
jitter : float or tuple
Standard deviation for jittering points in the plot.
Either a single scalar applied to both axes, or a tuple
containing x-axis jitter and y-axis jitter, respectively.
plot_points : bool
If True, the data points are plotted.
ax : AxesSubplot
Axes on which to plot, created if not provided.
Returns
-------
The matplotlib figure on which the plot id drawn. | plot_bivariate | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def plot_fit_obs(self, col_name, lowess_args=None,
lowess_min_n=40, jitter=None,
plot_points=True, ax=None):
"""
Plot fitted versus imputed or observed values as a scatterplot.
Parameters
----------
col_name : str
The variable to be plotted on the horizontal axis.
lowess_args : dict-like
Keyword arguments passed to lowess fit. A dictionary of
dictionaries, keys are 'o' and 'i' denoting 'observed' and
'imputed', respectively.
lowess_min_n : int
Minimum sample size to plot a lowess fit
jitter : float or tuple
Standard deviation for jittering points in the plot.
Either a single scalar applied to both axes, or a tuple
containing x-axis jitter and y-axis jitter, respectively.
plot_points : bool
If True, the data points are plotted.
ax : AxesSubplot
Axes on which to plot, created if not provided.
Returns
-------
The matplotlib figure on which the plot is drawn.
"""
from statsmodels.graphics import utils as gutils
from statsmodels.nonparametric.smoothers_lowess import lowess
if lowess_args is None:
lowess_args = {}
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
ax.set_position([0.1, 0.1, 0.7, 0.8])
ixi = self.ix_miss[col_name]
ixo = self.ix_obs[col_name]
vec1 = np.require(self.data[col_name], requirements="W")
# Fitted values
formula = self.conditional_formula[col_name]
mgr = FormulaManager()
endog, exog = mgr.get_matrices(formula, self.data, pandas=True)
results = self.results[col_name]
vec2 = results.predict(exog=exog)
vec2 = self._get_predicted(vec2)
if jitter is not None:
if np.isscalar(jitter):
jitter = (jitter, jitter)
vec1 += jitter[0] * np.random.normal(size=len(vec1))
vec2 += jitter[1] * np.random.normal(size=len(vec2))
# Plot the points
keys = ['o', 'i']
ixs = {'o': ixo, 'i': ixi}
lak = {'o': 'obs', 'i': 'imp'}
color = {'o': 'orange', 'i': 'lime'}
if plot_points:
for ky in keys:
ix = ixs[ky]
ax.plot(vec1[ix], vec2[ix], 'o', color=color[ky],
label=lak[ky], alpha=0.6)
# Plot the lowess fits
for ky in keys:
ix = ixs[ky]
if len(ix) < lowess_min_n:
continue
if ky in lowess_args:
la = lowess_args[ky]
else:
la = {}
ix = ixs[ky]
lfit = lowess(vec2[ix], vec1[ix], **la)
ax.plot(lfit[:, 0], lfit[:, 1], '-', color=color[ky],
alpha=0.6, lw=4, label=lak[ky])
ha, la = ax.get_legend_handles_labels()
leg = fig.legend(ha, la, loc='center right', numpoints=1)
leg.draw_frame(False)
ax.set_xlabel(col_name + " observed or imputed")
ax.set_ylabel(col_name + " fitted")
return fig | Plot fitted versus imputed or observed values as a scatterplot.
Parameters
----------
col_name : str
The variable to be plotted on the horizontal axis.
lowess_args : dict-like
Keyword arguments passed to lowess fit. A dictionary of
dictionaries, keys are 'o' and 'i' denoting 'observed' and
'imputed', respectively.
lowess_min_n : int
Minimum sample size to plot a lowess fit
jitter : float or tuple
Standard deviation for jittering points in the plot.
Either a single scalar applied to both axes, or a tuple
containing x-axis jitter and y-axis jitter, respectively.
plot_points : bool
If True, the data points are plotted.
ax : AxesSubplot
Axes on which to plot, created if not provided.
Returns
-------
The matplotlib figure on which the plot is drawn. | plot_fit_obs | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def plot_imputed_hist(self, col_name, ax=None, imp_hist_args=None,
obs_hist_args=None, all_hist_args=None):
"""
Display imputed values for one variable as a histogram.
Parameters
----------
col_name : str
The name of the variable to be plotted.
ax : AxesSubplot
An axes on which to draw the histograms. If not provided,
one is created.
imp_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for imputed values.
obs_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for observed values.
all_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for all values.
Returns
-------
The matplotlib figure on which the histograms were drawn
"""
from statsmodels.graphics import utils as gutils
if imp_hist_args is None:
imp_hist_args = {}
if obs_hist_args is None:
obs_hist_args = {}
if all_hist_args is None:
all_hist_args = {}
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
ax.set_position([0.1, 0.1, 0.7, 0.8])
ixm = self.ix_miss[col_name]
ixo = self.ix_obs[col_name]
imp = self.data[col_name].iloc[ixm]
obs = self.data[col_name].iloc[ixo]
for di in imp_hist_args, obs_hist_args, all_hist_args:
if 'histtype' not in di:
di['histtype'] = 'step'
ha, la = [], []
if len(imp) > 0:
h = ax.hist(np.asarray(imp), **imp_hist_args)
ha.append(h[-1][0])
la.append("Imp")
h1 = ax.hist(np.asarray(obs), **obs_hist_args)
h2 = ax.hist(np.asarray(self.data[col_name]), **all_hist_args)
ha.extend([h1[-1][0], h2[-1][0]])
la.extend(["Obs", "All"])
leg = fig.legend(ha, la, loc='center right', numpoints=1)
leg.draw_frame(False)
ax.set_xlabel(col_name)
ax.set_ylabel("Frequency")
return fig | Display imputed values for one variable as a histogram.
Parameters
----------
col_name : str
The name of the variable to be plotted.
ax : AxesSubplot
An axes on which to draw the histograms. If not provided,
one is created.
imp_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for imputed values.
obs_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for observed values.
all_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for all values.
Returns
-------
The matplotlib figure on which the histograms were drawn | plot_imputed_hist | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def _perturb_bootstrap(self, vname):
"""
Perturbs the model's parameters using a bootstrap.
"""
endog, exog, init_kwds, fit_kwds = self.get_fitting_data(vname)
m = len(endog)
rix = np.random.randint(0, m, m)
endog = endog[rix]
exog = exog[rix, :]
init_kwds = self._boot_kwds(init_kwds, rix)
fit_kwds = self._boot_kwds(fit_kwds, rix)
klass = self.model_class[vname]
self.models[vname] = klass(endog, exog, **init_kwds)
if vname in self.regularized and self.regularized[vname]:
self.results[vname] = (
self.models[vname].fit_regularized(**fit_kwds))
else:
self.results[vname] = self.models[vname].fit(**fit_kwds)
self.params[vname] = self.results[vname].params | Perturbs the model's parameters using a bootstrap. | _perturb_bootstrap | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def _perturb_gaussian(self, vname):
"""
Gaussian perturbation of model parameters.
The normal approximation to the sampling distribution of the
parameter estimates is used to define the mean and covariance
structure of the perturbation distribution.
"""
endog, exog, init_kwds, fit_kwds = self.get_fitting_data(vname)
klass = self.model_class[vname]
self.models[vname] = klass(endog, exog, **init_kwds)
self.results[vname] = self.models[vname].fit(**fit_kwds)
cov = self.results[vname].cov_params()
mu = self.results[vname].params
self.params[vname] = np.random.multivariate_normal(mean=mu, cov=cov) | Gaussian perturbation of model parameters.
The normal approximation to the sampling distribution of the
parameter estimates is used to define the mean and covariance
structure of the perturbation distribution. | _perturb_gaussian | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def update(self, vname):
"""
Impute missing values for a single variable.
This is a two-step process in which first the parameters are
perturbed, then the missing values are re-imputed.
Parameters
----------
vname : str
The name of the variable to be updated.
"""
self.perturb_params(vname)
self.impute(vname) | Impute missing values for a single variable.
This is a two-step process in which first the parameters are
perturbed, then the missing values are re-imputed.
Parameters
----------
vname : str
The name of the variable to be updated. | update | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def impute_pmm(self, vname):
"""
Use predictive mean matching to impute missing values.
Notes
-----
The `perturb_params` method must be called first to define the
model.
"""
k_pmm = self.k_pmm
endog_obs, exog_obs, exog_miss, predict_obs_kwds, predict_miss_kwds = (
self.get_split_data(vname))
# Predict imputed variable for both missing and non-missing
# observations
model = self.models[vname]
pendog_obs = model.predict(self.params[vname], exog_obs,
**predict_obs_kwds)
pendog_miss = model.predict(self.params[vname], exog_miss,
**predict_miss_kwds)
pendog_obs = self._get_predicted(pendog_obs)
pendog_miss = self._get_predicted(pendog_miss)
# Jointly sort the observed and predicted endog values for the
# cases with observed values.
ii = np.argsort(pendog_obs)
endog_obs = endog_obs[ii]
pendog_obs = pendog_obs[ii]
# Find the closest match to the predicted endog values for
# cases with missing endog values.
ix = np.searchsorted(pendog_obs, pendog_miss)
# Get the indices for the closest k_pmm values on
# either side of the closest index.
ixm = ix[:, None] + np.arange(-k_pmm, k_pmm)[None, :]
# Account for boundary effects
msk = np.nonzero((ixm < 0) | (ixm > len(endog_obs) - 1))
ixm = np.clip(ixm, 0, len(endog_obs) - 1)
# Get the distances
dx = pendog_miss[:, None] - pendog_obs[ixm]
dx = np.abs(dx)
dx[msk] = np.inf
# Closest positions in ix, row-wise.
dxi = np.argsort(dx, 1)[:, 0:k_pmm]
# Choose a column for each row.
ir = np.random.randint(0, k_pmm, len(pendog_miss))
# Unwind the indices
jj = np.arange(dxi.shape[0])
ix = dxi[(jj, ir)]
iz = ixm[(jj, ix)]
imputed_miss = np.array(endog_obs[iz]).squeeze()
self._store_changes(vname, imputed_miss) | Use predictive mean matching to impute missing values.
Notes
-----
The `perturb_params` method must be called first to define the
model. | impute_pmm | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def next_sample(self):
"""
Perform one complete MICE iteration.
A single MICE iteration updates all missing values using their
respective imputation models, then fits the analysis model to
the imputed data.
Returns
-------
params : array_like
The model parameters for the analysis model.
Notes
-----
This function fits the analysis model and returns its
parameter estimate. The parameter vector is not stored by the
class and is not used in any subsequent calls to `combine`.
Use `fit` to run all MICE steps together and obtain summary
results.
The complete cycle of missing value imputation followed by
fitting the analysis model is repeated `n_skip + 1` times and
the analysis model parameters from the final fit are returned.
"""
# Impute missing values
self.data.update_all(self.n_skip + 1)
start_params = None
if len(self.results_list) > 0:
start_params = self.results_list[-1].params
# Fit the analysis model.
model = self.model_class.from_formula(self.model_formula,
self.data.data,
**self.init_kwds)
self.fit_kwds.update({"start_params": start_params})
result = model.fit(**self.fit_kwds)
return result | Perform one complete MICE iteration.
A single MICE iteration updates all missing values using their
respective imputation models, then fits the analysis model to
the imputed data.
Returns
-------
params : array_like
The model parameters for the analysis model.
Notes
-----
This function fits the analysis model and returns its
parameter estimate. The parameter vector is not stored by the
class and is not used in any subsequent calls to `combine`.
Use `fit` to run all MICE steps together and obtain summary
results.
The complete cycle of missing value imputation followed by
fitting the analysis model is repeated `n_skip + 1` times and
the analysis model parameters from the final fit are returned. | next_sample | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def fit(self, n_burnin=10, n_imputations=10):
"""
Fit a model using MICE.
Parameters
----------
n_burnin : int
The number of burn-in cycles to skip.
n_imputations : int
The number of data sets to impute
"""
# Run without fitting the analysis model
self.data.update_all(n_burnin)
for j in range(n_imputations):
result = self.next_sample()
self.results_list.append(result)
self.endog_names = result.model.endog_names
self.exog_names = result.model.exog_names
return self.combine() | Fit a model using MICE.
Parameters
----------
n_burnin : int
The number of burn-in cycles to skip.
n_imputations : int
The number of data sets to impute | fit | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def combine(self):
"""
Pools MICE imputation results.
This method can only be used after the `run` method has been
called. Returns estimates and standard errors of the analysis
model parameters.
Returns a MICEResults instance.
"""
# Extract a few things from the models that were fit to
# imputed data sets.
params_list = []
cov_within = 0.
scale_list = []
for results in self.results_list:
results_uw = results._results
params_list.append(results_uw.params)
cov_within += results_uw.cov_params()
scale_list.append(results.scale)
params_list = np.asarray(params_list)
scale_list = np.asarray(scale_list)
# The estimated parameters for the MICE analysis
params = params_list.mean(0)
# The average of the within-imputation covariances
cov_within /= len(self.results_list)
# The between-imputation covariance
cov_between = np.cov(params_list.T)
# The estimated covariance matrix for the MICE analysis
f = 1 + 1 / float(len(self.results_list))
cov_params = cov_within + f * cov_between
# Fraction of missing information
fmi = f * np.diag(cov_between) / np.diag(cov_params)
# Set up a results instance
scale = np.mean(scale_list)
results = MICEResults(self, params, cov_params / scale)
results.scale = scale
results.frac_miss_info = fmi
results.exog_names = self.exog_names
results.endog_names = self.endog_names
results.model_class = self.model_class
return results | Pools MICE imputation results.
This method can only be used after the `run` method has been
called. Returns estimates and standard errors of the analysis
model parameters.
Returns a MICEResults instance. | combine | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def summary(self, title=None, alpha=.05):
"""
Summarize the results of running MICE.
Parameters
----------
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
Significance level for the confidence intervals
Returns
-------
smry : Summary instance
This holds the summary tables and text, which can be
printed or converted to various output formats.
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
float_format = "%8.3f"
info = {}
info["Method:"] = "MICE"
info["Model:"] = self.model_class.__name__
info["Dependent variable:"] = self.endog_names
info["Sample size:"] = "%d" % self.model.data.data.shape[0]
info["Scale"] = "%.2f" % self.scale
info["Num. imputations"] = "%d" % len(self.model.results_list)
smry.add_dict(info, align='l', float_format=float_format)
param = summary2.summary_params(self, alpha=alpha)
param["FMI"] = self.frac_miss_info
smry.add_df(param, float_format=float_format)
smry.add_title(title=title, results=self)
return smry | Summarize the results of running MICE.
Parameters
----------
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
Significance level for the confidence intervals
Returns
-------
smry : Summary instance
This holds the summary tables and text, which can be
printed or converted to various output formats. | summary | python | statsmodels/statsmodels | statsmodels/imputation/mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/mice.py | BSD-3-Clause |
def gendat():
"""
Create a data set with missing values.
"""
gen = np.random.RandomState(34243)
n = 200
p = 5
exog = gen.normal(size=(n, p))
exog[:, 0] = exog[:, 1] - exog[:, 2] + 2*exog[:, 4]
exog[:, 0] += gen.normal(size=n)
exog[:, 2] = 1*(exog[:, 2] > 0)
endog = exog.sum(1) + gen.normal(size=n)
df = pd.DataFrame(exog)
df.columns = ["x%d" % k for k in range(1, p+1)]
df["y"] = endog
# loc is inclusive of right end, so needed to lower index by 1
df.loc[0:59, "x1"] = np.nan
df.loc[0:39, "x2"] = np.nan
df.loc[10:29:2, "x3"] = np.nan
df.loc[20:49:3, "x4"] = np.nan
df.loc[40:44, "x5"] = np.nan
df.loc[30:99:2, "y"] = np.nan
return df | Create a data set with missing values. | gendat | python | statsmodels/statsmodels | statsmodels/imputation/tests/test_mice.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/imputation/tests/test_mice.py | BSD-3-Clause |
def csv2st(csvfile, headers=False, stubs=False, title=None):
"""Return SimpleTable instance,
created from the data in `csvfile`,
which is in comma separated values format.
The first row may contain headers: set headers=True.
The first column may contain stubs: set stubs=True.
Can also supply headers and stubs as tuples of strings.
"""
rows = list()
with open(csvfile, encoding="utf-8") as fh:
reader = csv.reader(fh)
if headers is True:
headers = next(reader)
elif headers is False:
headers = ()
if stubs is True:
stubs = list()
for row in reader:
if row:
stubs.append(row[0])
rows.append(row[1:])
else: # no stubs, or stubs provided
for row in reader:
if row:
rows.append(row)
if stubs is False:
stubs = ()
ncols = len(rows[0])
if any(len(row) != ncols for row in rows):
raise OSError('All rows of CSV file must have same length.')
return SimpleTable(data=rows, headers=headers, stubs=stubs) | Return SimpleTable instance,
created from the data in `csvfile`,
which is in comma separated values format.
The first row may contain headers: set headers=True.
The first column may contain stubs: set stubs=True.
Can also supply headers and stubs as tuples of strings. | csv2st | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def __init__(self, data, headers=None, stubs=None, title='',
datatypes=None, csv_fmt=None, txt_fmt=None, ltx_fmt=None,
html_fmt=None, celltype=None, rowtype=None, **fmt_dict):
"""
Parameters
----------
data : list of lists or 2d array (not matrix!)
R rows by K columns of table elements
headers : list (or tuple) of str
sequence of K strings, one per header
stubs : list (or tuple) of str
sequence of R strings, one per stub
title : str
title of the table
datatypes : list of int
indexes to `data_fmts`
txt_fmt : dict
text formatting options
ltx_fmt : dict
latex formatting options
csv_fmt : dict
csv formatting options
hmtl_fmt : dict
hmtl formatting options
celltype : class
the cell class for the table (default: Cell)
rowtype : class
the row class for the table (default: Row)
fmt_dict : dict
general formatting options
"""
self.title = title
self._datatypes = datatypes
if self._datatypes is None:
self._datatypes = [] if len(data) == 0 else lrange(len(data[0]))
# start with default formatting
self._txt_fmt = default_txt_fmt.copy()
self._latex_fmt = default_latex_fmt.copy()
self._csv_fmt = default_csv_fmt.copy()
self._html_fmt = default_html_fmt.copy()
# substitute any general user specified formatting
# :note: these will be overridden by output specific arguments
self._csv_fmt.update(fmt_dict)
self._txt_fmt.update(fmt_dict)
self._latex_fmt.update(fmt_dict)
self._html_fmt.update(fmt_dict)
# substitute any output-type specific formatting
self._csv_fmt.update(csv_fmt or dict())
self._txt_fmt.update(txt_fmt or dict())
self._latex_fmt.update(ltx_fmt or dict())
self._html_fmt.update(html_fmt or dict())
self.output_formats = dict(
txt=self._txt_fmt,
csv=self._csv_fmt,
html=self._html_fmt,
latex=self._latex_fmt
)
self._Cell = celltype or Cell
self._Row = rowtype or Row
rows = self._data2rows(data) # a list of Row instances
list.__init__(self, rows)
self._add_headers_stubs(headers, stubs)
self._colwidths = dict() | Parameters
----------
data : list of lists or 2d array (not matrix!)
R rows by K columns of table elements
headers : list (or tuple) of str
sequence of K strings, one per header
stubs : list (or tuple) of str
sequence of R strings, one per stub
title : str
title of the table
datatypes : list of int
indexes to `data_fmts`
txt_fmt : dict
text formatting options
ltx_fmt : dict
latex formatting options
csv_fmt : dict
csv formatting options
hmtl_fmt : dict
hmtl formatting options
celltype : class
the cell class for the table (default: Cell)
rowtype : class
the row class for the table (default: Row)
fmt_dict : dict
general formatting options | __init__ | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def _add_headers_stubs(self, headers, stubs):
"""Return None. Adds headers and stubs to table,
if these were provided at initialization.
Parameters
----------
headers : list[str]
K strings, where K is number of columns
stubs : list[str]
R strings, where R is number of non-header rows
:note: a header row does not receive a stub!
"""
if headers:
self.insert_header_row(0, headers, dec_below='header_dec_below')
if stubs:
self.insert_stubs(0, stubs) | Return None. Adds headers and stubs to table,
if these were provided at initialization.
Parameters
----------
headers : list[str]
K strings, where K is number of columns
stubs : list[str]
R strings, where R is number of non-header rows
:note: a header row does not receive a stub! | _add_headers_stubs | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def insert(self, idx, row, datatype=None):
"""Return None. Insert a row into a table.
"""
if datatype is None:
try:
datatype = row.datatype
except AttributeError:
pass
row = self._Row(row, datatype=datatype, table=self)
list.insert(self, idx, row) | Return None. Insert a row into a table. | insert | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def insert_header_row(self, rownum, headers, dec_below='header_dec_below'):
"""Return None. Insert a row of headers,
where ``headers`` is a sequence of strings.
(The strings may contain newlines, to indicated multiline headers.)
"""
header_rows = [header.split('\n') for header in headers]
# rows in reverse order
rows = list(zip_longest(*header_rows, **dict(fillvalue='')))
rows.reverse()
for i, row in enumerate(rows):
self.insert(rownum, row, datatype='header')
if i == 0:
self[rownum].dec_below = dec_below
else:
self[rownum].dec_below = None | Return None. Insert a row of headers,
where ``headers`` is a sequence of strings.
(The strings may contain newlines, to indicated multiline headers.) | insert_header_row | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def insert_stubs(self, loc, stubs):
"""Return None. Insert column of stubs at column `loc`.
If there is a header row, it gets an empty cell.
So ``len(stubs)`` should equal the number of non-header rows.
"""
_Cell = self._Cell
stubs = iter(stubs)
for row in self:
if row.datatype == 'header':
empty_cell = _Cell('', datatype='empty')
row.insert(loc, empty_cell)
else:
try:
row.insert_stub(loc, next(stubs))
except StopIteration:
raise ValueError('length of stubs must match table length') | Return None. Insert column of stubs at column `loc`.
If there is a header row, it gets an empty cell.
So ``len(stubs)`` should equal the number of non-header rows. | insert_stubs | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def _data2rows(self, raw_data):
"""Return list of Row,
the raw data as rows of cells.
"""
_Cell = self._Cell
_Row = self._Row
rows = []
for datarow in raw_data:
dtypes = cycle(self._datatypes)
newrow = _Row(datarow, datatype='data', table=self, celltype=_Cell)
for cell in newrow:
cell.datatype = next(dtypes)
cell.row = newrow # a cell knows its row
rows.append(newrow)
return rows | Return list of Row,
the raw data as rows of cells. | _data2rows | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def pad(self, s, width, align):
"""DEPRECATED: just use the pad function"""
return pad(s, width, align) | DEPRECATED: just use the pad function | pad | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def _get_colwidths(self, output_format, **fmt_dict):
"""Return list, the calculated widths of each column."""
output_format = get_output_format(output_format)
fmt = self.output_formats[output_format].copy()
fmt.update(fmt_dict)
ncols = max(len(row) for row in self)
request = fmt.get('colwidths')
if request == 0: # assume no extra space desired (e.g, CSV)
return [0] * ncols
elif request is None: # assume no extra space desired (e.g, CSV)
request = [0] * ncols
elif isinstance(request, int):
request = [request] * ncols
elif len(request) < ncols:
request = [request[i % len(request)] for i in range(ncols)]
min_widths = []
for col in zip(*self):
maxwidth = max(len(c.format(0, output_format, **fmt)) for c in col)
min_widths.append(maxwidth)
result = lmap(max, min_widths, request)
return result | Return list, the calculated widths of each column. | _get_colwidths | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def get_colwidths(self, output_format, **fmt_dict):
"""Return list, the widths of each column."""
call_args = [output_format]
for k, v in sorted(fmt_dict.items()):
if isinstance(v, list):
call_args.append((k, tuple(v)))
elif isinstance(v, dict):
call_args.append((k, tuple(sorted(v.items()))))
else:
call_args.append((k, v))
key = tuple(call_args)
try:
return self._colwidths[key]
except KeyError:
self._colwidths[key] = self._get_colwidths(output_format,
**fmt_dict)
return self._colwidths[key] | Return list, the widths of each column. | get_colwidths | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def _get_fmt(self, output_format, **fmt_dict):
"""Return dict, the formatting options.
"""
output_format = get_output_format(output_format)
# first get the default formatting
try:
fmt = self.output_formats[output_format].copy()
except KeyError:
raise ValueError('Unknown format: %s' % output_format)
# then, add formatting specific to this call
fmt.update(fmt_dict)
return fmt | Return dict, the formatting options. | _get_fmt | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def as_csv(self, **fmt_dict):
"""Return string, the table in CSV format.
Currently only supports comma separator."""
# fetch the format, which may just be default_csv_format
fmt = self._get_fmt('csv', **fmt_dict)
return self.as_text(**fmt) | Return string, the table in CSV format.
Currently only supports comma separator. | as_csv | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def as_text(self, **fmt_dict):
"""Return string, the table as text."""
# fetch the text format, override with fmt_dict
fmt = self._get_fmt('txt', **fmt_dict)
# get rows formatted as strings
formatted_rows = [row.as_string('text', **fmt) for row in self]
rowlen = len(formatted_rows[-1]) # do not use header row
# place decoration above the table body, if desired
table_dec_above = fmt.get('table_dec_above', '=')
if table_dec_above:
formatted_rows.insert(0, table_dec_above * rowlen)
# next place a title at the very top, if desired
# :note: user can include a newlines at end of title if desired
title = self.title
if title:
title = pad(self.title, rowlen, fmt.get('title_align', 'c'))
formatted_rows.insert(0, title)
# add decoration below the table, if desired
table_dec_below = fmt.get('table_dec_below', '-')
if table_dec_below:
formatted_rows.append(table_dec_below * rowlen)
return '\n'.join(formatted_rows) | Return string, the table as text. | as_text | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def as_html(self, **fmt_dict):
"""Return string.
This is the default formatter for HTML tables.
An HTML table formatter must accept as arguments
a table and a format dictionary.
"""
# fetch the text format, override with fmt_dict
fmt = self._get_fmt('html', **fmt_dict)
formatted_rows = ['<table class="simpletable">']
if self.title:
title = '<caption>%s</caption>' % self.title
formatted_rows.append(title)
formatted_rows.extend(row.as_string('html', **fmt) for row in self)
formatted_rows.append('</table>')
return '\n'.join(formatted_rows) | Return string.
This is the default formatter for HTML tables.
An HTML table formatter must accept as arguments
a table and a format dictionary. | as_html | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def as_latex_tabular(self, center=True, **fmt_dict):
'''Return string, the table as a LaTeX tabular environment.
Note: will require the booktabs package.'''
# fetch the text format, override with fmt_dict
fmt = self._get_fmt('latex', **fmt_dict)
formatted_rows = []
if center:
formatted_rows.append(r'\begin{center}')
table_dec_above = fmt['table_dec_above'] or ''
table_dec_below = fmt['table_dec_below'] or ''
prev_aligns = None
last = None
for row in self + [last]:
if row == last:
aligns = None
else:
aligns = row.get_aligns('latex', **fmt)
if aligns != prev_aligns:
# When the number/type of columns changes...
if prev_aligns:
# ... if there is a tabular to close, close it...
formatted_rows.append(table_dec_below)
formatted_rows.append(r'\end{tabular}')
if aligns:
# ... and if there are more lines, open a new one:
formatted_rows.append(r'\begin{tabular}{%s}' % aligns)
if not prev_aligns:
# (with a nice line if it's the top of the whole table)
formatted_rows.append(table_dec_above)
if row != last:
formatted_rows.append(
row.as_string(output_format='latex', **fmt))
prev_aligns = aligns
# tabular does not support caption, but make it available for
# figure environment
if self.title:
title = r'%%\caption{%s}' % self.title
formatted_rows.append(title)
if center:
formatted_rows.append(r'\end{center}')
# Replace $$ due to bug in GH 5444
return '\n'.join(formatted_rows).replace('$$', ' ') | Return string, the table as a LaTeX tabular environment.
Note: will require the booktabs package. | as_latex_tabular | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def extend_right(self, table):
"""Return None.
Extend each row of `self` with corresponding row of `table`.
Does **not** import formatting from ``table``.
This generally makes sense only if the two tables have
the same number of rows, but that is not enforced.
:note: To extend append a table below, just use `extend`,
which is the ordinary list method. This generally makes sense
only if the two tables have the same number of columns,
but that is not enforced.
"""
for row1, row2 in zip(self, table):
row1.extend(row2) | Return None.
Extend each row of `self` with corresponding row of `table`.
Does **not** import formatting from ``table``.
This generally makes sense only if the two tables have
the same number of rows, but that is not enforced.
:note: To extend append a table below, just use `extend`,
which is the ordinary list method. This generally makes sense
only if the two tables have the same number of columns,
but that is not enforced. | extend_right | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def label_cells(self, func):
"""Return None. Labels cells based on `func`.
If ``func(cell) is None`` then its datatype is
not changed; otherwise it is set to ``func(cell)``.
"""
for row in self:
for cell in row:
label = func(cell)
if label is not None:
cell.datatype = label | Return None. Labels cells based on `func`.
If ``func(cell) is None`` then its datatype is
not changed; otherwise it is set to ``func(cell)``. | label_cells | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def pad(s, width, align):
"""Return string padded with spaces,
based on alignment parameter."""
if align == 'l':
s = s.ljust(width)
elif align == 'r':
s = s.rjust(width)
else:
s = s.center(width)
return s | Return string padded with spaces,
based on alignment parameter. | pad | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def __init__(self, seq, datatype='data', table=None, celltype=None,
dec_below='row_dec_below', **fmt_dict):
"""
Parameters
----------
seq : sequence of data or cells
table : SimpleTable
datatype : str ('data' or 'header')
dec_below : str
(e.g., 'header_dec_below' or 'row_dec_below')
decoration tag, identifies the decoration to go below the row.
(Decoration is repeated as needed for text formats.)
"""
self.datatype = datatype
self.table = table
if celltype is None:
if table is None:
celltype = Cell
else:
celltype = table._Cell
self._Cell = celltype
self._fmt = fmt_dict
self.special_fmts = dict() # special formatting for any output format
self.dec_below = dec_below
list.__init__(self, (celltype(cell, row=self) for cell in seq)) | Parameters
----------
seq : sequence of data or cells
table : SimpleTable
datatype : str ('data' or 'header')
dec_below : str
(e.g., 'header_dec_below' or 'row_dec_below')
decoration tag, identifies the decoration to go below the row.
(Decoration is repeated as needed for text formats.) | __init__ | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def add_format(self, output_format, **fmt_dict):
"""
Return None. Adds row-instance specific formatting
for the specified output format.
Example: myrow.add_format('txt', row_dec_below='+-')
"""
output_format = get_output_format(output_format)
if output_format not in self.special_fmts:
self.special_fmts[output_format] = dict()
self.special_fmts[output_format].update(fmt_dict) | Return None. Adds row-instance specific formatting
for the specified output format.
Example: myrow.add_format('txt', row_dec_below='+-') | add_format | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def insert_stub(self, loc, stub):
"""Return None. Inserts a stub cell
in the row at `loc`.
"""
_Cell = self._Cell
if not isinstance(stub, _Cell):
stub = stub
stub = _Cell(stub, datatype='stub', row=self)
self.insert(loc, stub) | Return None. Inserts a stub cell
in the row at `loc`. | insert_stub | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def get_aligns(self, output_format, **fmt_dict):
"""Return string, sequence of column alignments.
Ensure comformable data_aligns in `fmt_dict`."""
fmt = self._get_fmt(output_format, **fmt_dict)
return ''.join(cell.alignment(output_format, **fmt) for cell in self) | Return string, sequence of column alignments.
Ensure comformable data_aligns in `fmt_dict`. | get_aligns | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def as_string(self, output_format='txt', **fmt_dict):
"""Return string: the formatted row.
This is the default formatter for rows.
Override this to get different formatting.
A row formatter must accept as arguments
a row (self) and an output format,
one of ('html', 'txt', 'csv', 'latex').
"""
fmt = self._get_fmt(output_format, **fmt_dict)
# get column widths
try:
colwidths = self.table.get_colwidths(output_format, **fmt)
except AttributeError:
colwidths = fmt.get('colwidths')
if colwidths is None:
colwidths = (0,) * len(self)
colsep = fmt['colsep']
row_pre = fmt.get('row_pre', '')
row_post = fmt.get('row_post', '')
formatted_cells = []
for cell, width in zip(self, colwidths):
content = cell.format(width, output_format=output_format, **fmt)
formatted_cells.append(content)
formatted_row = row_pre + colsep.join(formatted_cells) + row_post
formatted_row = self._decorate_below(formatted_row, output_format,
**fmt)
return formatted_row | Return string: the formatted row.
This is the default formatter for rows.
Override this to get different formatting.
A row formatter must accept as arguments
a row (self) and an output format,
one of ('html', 'txt', 'csv', 'latex'). | as_string | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def _decorate_below(self, row_as_string, output_format, **fmt_dict):
"""This really only makes sense for the text and latex output formats.
"""
dec_below = fmt_dict.get(self.dec_below, None)
if dec_below is None:
result = row_as_string
else:
output_format = get_output_format(output_format)
if output_format == 'txt':
row0len = len(row_as_string)
dec_len = len(dec_below)
repeat, addon = divmod(row0len, dec_len)
result = row_as_string + "\n" + (dec_below * repeat +
dec_below[:addon])
elif output_format == 'latex':
result = row_as_string + "\n" + dec_below
else:
raise ValueError("I cannot decorate a %s header." %
output_format)
return result | This really only makes sense for the text and latex output formats. | _decorate_below | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def format(self, width, output_format='txt', **fmt_dict):
"""Return string.
This is the default formatter for cells.
Override this to get different formating.
A cell formatter must accept as arguments
a cell (self) and an output format,
one of ('html', 'txt', 'csv', 'latex').
It will generally respond to the datatype,
one of (int, 'header', 'stub').
"""
fmt = self._get_fmt(output_format, **fmt_dict)
data = self.data
datatype = self.datatype
data_fmts = fmt.get('data_fmts')
if data_fmts is None:
# chk allow for deprecated use of data_fmt
data_fmt = fmt.get('data_fmt')
if data_fmt is None:
data_fmt = '%s'
data_fmts = [data_fmt]
if isinstance(datatype, int):
datatype = datatype % len(data_fmts) # constrain to indexes
data_fmt = data_fmts[datatype]
if isinstance(data_fmt, str):
content = data_fmt % (data,)
elif callable(data_fmt):
content = data_fmt(data)
else:
raise TypeError("Must be a string or a callable")
if datatype == 0:
content = self._latex_escape(content, fmt, output_format)
elif datatype in fmt:
data = self._latex_escape(data, fmt, output_format)
dfmt = fmt.get(datatype)
try:
content = dfmt % (data,)
except TypeError: # dfmt is not a substitution string
content = dfmt
else:
raise ValueError('Unknown cell datatype: %s' % datatype)
align = self.alignment(output_format, **fmt)
return pad(content, width, align) | Return string.
This is the default formatter for cells.
Override this to get different formating.
A cell formatter must accept as arguments
a cell (self) and an output format,
one of ('html', 'txt', 'csv', 'latex').
It will generally respond to the datatype,
one of (int, 'header', 'stub'). | format | python | statsmodels/statsmodels | statsmodels/iolib/table.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/table.py | BSD-3-Clause |
def savetxt(fname, X, names=None, fmt='%.18e', delimiter=' '):
"""
Save an array to a text file.
This is just a copy of numpy.savetxt patched to support structured arrays
or a header of names. Does not include py3 support now in savetxt.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
names : list, optional
If given names will be the column header in the text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : str of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> savetxt('test.out', x, delimiter=',') # x is an array
>>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
with get_file_obj(fname, 'w') as fh:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a list of formats.
# E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if isinstance(fmt, (list, tuple)):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = delimiter.join(fmt)
elif isinstance(fmt, str):
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
# handle names
if names is None and X.dtype.names:
names = X.dtype.names
if names is not None:
fh.write(delimiter.join(names) + '\n')
for row in X:
fh.write(format % tuple(row) + '\n') | Save an array to a text file.
This is just a copy of numpy.savetxt patched to support structured arrays
or a header of names. Does not include py3 support now in savetxt.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
names : list, optional
If given names will be the column header in the text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : str of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> savetxt('test.out', x, delimiter=',') # x is an array
>>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation | savetxt | python | statsmodels/statsmodels | statsmodels/iolib/foreign.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/foreign.py | BSD-3-Clause |
def _repr_html_(self):
"""Display as HTML in IPython notebook."""
return self.as_html() | Display as HTML in IPython notebook. | _repr_html_ | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def _repr_latex_(self):
'''Display as LaTeX when converting IPython notebook to PDF.'''
return self.as_latex() | Display as LaTeX when converting IPython notebook to PDF. | _repr_latex_ | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def add_df(self, df, index=True, header=True, float_format='%.4f',
align='r'):
"""
Add the contents of a DataFrame to summary table
Parameters
----------
df : DataFrame
header : bool
Reproduce the DataFrame column labels in summary table
index : bool
Reproduce the DataFrame row labels in summary table
float_format : str
Formatting to float data columns
align : str
Data alignment (l/c/r)
"""
settings = {'index': index, 'header': header,
'float_format': float_format, 'align': align}
self.tables.append(df)
self.settings.append(settings) | Add the contents of a DataFrame to summary table
Parameters
----------
df : DataFrame
header : bool
Reproduce the DataFrame column labels in summary table
index : bool
Reproduce the DataFrame row labels in summary table
float_format : str
Formatting to float data columns
align : str
Data alignment (l/c/r) | add_df | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def add_array(self, array, align='r', float_format="%.4f"):
"""Add the contents of a Numpy array to summary table
Parameters
----------
array : numpy array (2D)
float_format : str
Formatting to array if type is float
align : str
Data alignment (l/c/r)
"""
table = pd.DataFrame(array)
self.add_df(table, index=False, header=False,
float_format=float_format, align=align) | Add the contents of a Numpy array to summary table
Parameters
----------
array : numpy array (2D)
float_format : str
Formatting to array if type is float
align : str
Data alignment (l/c/r) | add_array | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def add_dict(self, d, ncols=2, align='l', float_format="%.4f"):
"""Add the contents of a Dict to summary table
Parameters
----------
d : dict
Keys and values are automatically coerced to strings with str().
Users are encouraged to format them before using add_dict.
ncols : int
Number of columns of the output table
align : str
Data alignment (l/c/r)
float_format : str
Formatting to float data columns
"""
keys = [_formatter(x, float_format) for x in d.keys()]
vals = [_formatter(x, float_format) for x in d.values()]
data = np.array(lzip(keys, vals))
if data.shape[0] % ncols != 0:
pad = ncols - (data.shape[0] % ncols)
data = np.vstack([data, np.array(pad * [['', '']])])
data = np.split(data, ncols)
data = reduce(lambda x, y: np.hstack([x, y]), data)
self.add_array(data, align=align) | Add the contents of a Dict to summary table
Parameters
----------
d : dict
Keys and values are automatically coerced to strings with str().
Users are encouraged to format them before using add_dict.
ncols : int
Number of columns of the output table
align : str
Data alignment (l/c/r)
float_format : str
Formatting to float data columns | add_dict | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def add_text(self, string):
"""Append a note to the bottom of the summary table. In ASCII tables,
the note will be wrapped to table width. Notes are not indented.
"""
self.extra_txt.append(string) | Append a note to the bottom of the summary table. In ASCII tables,
the note will be wrapped to table width. Notes are not indented. | add_text | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def add_title(self, title=None, results=None):
"""Insert a title on top of the summary table. If a string is provided
in the title argument, that string is printed. If no title string is
provided but a results instance is provided, statsmodels attempts
to construct a useful title automatically.
"""
if isinstance(title, str):
self.title = title
else:
if results is not None:
model = results.model.__class__.__name__
if model in _model_types:
model = _model_types[model]
self.title = 'Results: ' + model
else:
self.title = '' | Insert a title on top of the summary table. If a string is provided
in the title argument, that string is printed. If no title string is
provided but a results instance is provided, statsmodels attempts
to construct a useful title automatically. | add_title | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def add_base(self, results, alpha=0.05, float_format="%.4f", title=None,
xname=None, yname=None):
"""Try to construct a basic summary instance.
Parameters
----------
results : Model results instance
alpha : float
significance level for the confidence intervals (optional)
float_format: str
Float formatting for summary of parameters (optional)
title : str
Title of the summary table (optional)
xname : list[str] of length equal to the number of parameters
Names of the independent variables (optional)
yname : str
Name of the dependent variable (optional)
"""
param = summary_params(results, alpha=alpha, use_t=results.use_t)
info = summary_model(results)
if xname is not None:
param.index = xname
if yname is not None:
info['Dependent Variable:'] = yname
self.add_dict(info, align='l')
self.add_df(param, float_format=float_format)
self.add_title(title=title, results=results) | Try to construct a basic summary instance.
Parameters
----------
results : Model results instance
alpha : float
significance level for the confidence intervals (optional)
float_format: str
Float formatting for summary of parameters (optional)
title : str
Title of the summary table (optional)
xname : list[str] of length equal to the number of parameters
Names of the independent variables (optional)
yname : str
Name of the dependent variable (optional) | add_base | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def as_text(self):
"""Generate ASCII Summary Table
"""
tables = self.tables
settings = self.settings
title = self.title
extra_txt = self.extra_txt
pad_col, pad_index, widest = _measure_tables(tables, settings)
rule_equal = widest * '='
simple_tables = _simple_tables(tables, settings, pad_col, pad_index)
tab = [x.as_text() for x in simple_tables]
tab = '\n'.join(tab)
tab = tab.split('\n')
tab[0] = rule_equal
tab.append(rule_equal)
tab = '\n'.join(tab)
if title is not None:
title = title
if len(title) < widest:
title = ' ' * int(widest / 2 - len(title) / 2) + title
else:
title = ''
txt = [textwrap.wrap(x, widest) for x in extra_txt]
txt = ['\n'.join(x) for x in txt]
txt = '\n'.join(txt)
out = '\n'.join([title, tab, txt])
return out | Generate ASCII Summary Table | as_text | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def as_html(self):
"""Generate HTML Summary Table
"""
tables = self.tables
settings = self.settings
simple_tables = _simple_tables(tables, settings)
tab = [x.as_html() for x in simple_tables]
tab = '\n'.join(tab)
temp_txt = [st.replace('\n', '<br/>\n')for st in self.extra_txt]
txt = '<br/>\n'.join(temp_txt)
out = '<br/>\n'.join([tab, txt])
return out | Generate HTML Summary Table | as_html | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def as_latex(self, label=''):
"""Generate LaTeX Summary Table
Parameters
----------
label : str
Label of the summary table that can be referenced
in a latex document (optional)
"""
tables = self.tables
settings = self.settings
title = self.title
if title is not None:
title = '\\caption{' + title + '}'
else:
title = '\\caption{}'
label = '\\label{' + label + '}'
simple_tables = _simple_tables(tables, settings)
tab = [x.as_latex_tabular() for x in simple_tables]
tab = '\n\n'.join(tab)
to_replace = ('\\\\hline\\n\\\\hline\\n\\\\'
'end{tabular}\\n\\\\begin{tabular}{.*}\\n')
if self._merge_latex:
# create single tabular object for summary_col
tab = re.sub(to_replace, r'\\midrule\n', tab)
non_captioned = '\\begin{table}', title, label, tab, '\\end{table}'
non_captioned = '\n'.join(non_captioned)
txt = ' \\newline \n'.join(self.extra_txt)
out = non_captioned + '\n\\bigskip\n' + txt
return out | Generate LaTeX Summary Table
Parameters
----------
label : str
Label of the summary table that can be referenced
in a latex document (optional) | as_latex | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def _measure_tables(tables, settings):
"""Compare width of ascii tables in a list and calculate padding values.
We add space to each col_sep to get us as close as possible to the
width of the largest table. Then, we add a few spaces to the first
column to pad the rest.
"""
simple_tables = _simple_tables(tables, settings)
tab = [x.as_text() for x in simple_tables]
length = [len(x.splitlines()[0]) for x in tab]
len_max = max(length)
pad_sep = []
pad_index = []
for i in range(len(tab)):
nsep = max(tables[i].shape[1] - 1, 1)
pad = int((len_max - length[i]) / nsep)
pad_sep.append(pad)
len_new = length[i] + nsep * pad
pad_index.append(len_max - len_new)
return pad_sep, pad_index, max(length) | Compare width of ascii tables in a list and calculate padding values.
We add space to each col_sep to get us as close as possible to the
width of the largest table. Then, we add a few spaces to the first
column to pad the rest. | _measure_tables | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def summary_model(results):
"""
Create a dict with information about the model
"""
def time_now(*args, **kwds):
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d %H:%M')
info = {}
info['Model:'] = lambda x: x.model.__class__.__name__
info['Model Family:'] = lambda x: x.family.__class.__name__
info['Link Function:'] = lambda x: x.family.link.__class__.__name__
info['Dependent Variable:'] = lambda x: x.model.endog_names
info['Date:'] = time_now
info['No. Observations:'] = lambda x: "%#6d" % x.nobs
info['Df Model:'] = lambda x: "%#6d" % x.df_model
info['Df Residuals:'] = lambda x: "%#6d" % x.df_resid
info['Converged:'] = lambda x: x.mle_retvals['converged']
info['No. Iterations:'] = lambda x: x.mle_retvals['iterations']
info['Method:'] = lambda x: x.method
info['Norm:'] = lambda x: x.fit_options['norm']
info['Scale Est.:'] = lambda x: x.fit_options['scale_est']
info['Cov. Type:'] = lambda x: x.fit_options['cov']
rsquared_type = '' if results.k_constant else ' (uncentered)'
info['R-squared' + rsquared_type + ':'] = lambda x: "%#8.3f" % x.rsquared
info['Adj. R-squared' + rsquared_type + ':'] = lambda x: "%#8.3f" % x.rsquared_adj # noqa:E501
info['Pseudo R-squared:'] = lambda x: "%#8.3f" % x.prsquared
info['AIC:'] = lambda x: "%8.4f" % x.aic
info['BIC:'] = lambda x: "%8.4f" % x.bic
info['Log-Likelihood:'] = lambda x: "%#8.5g" % x.llf
info['LL-Null:'] = lambda x: "%#8.5g" % x.llnull
info['LLR p-value:'] = lambda x: "%#8.5g" % x.llr_pvalue
info['Deviance:'] = lambda x: "%#8.5g" % x.deviance
info['Pearson chi2:'] = lambda x: "%#6.3g" % x.pearson_chi2
info['F-statistic:'] = lambda x: "%#8.4g" % x.fvalue
info['Prob (F-statistic):'] = lambda x: "%#6.3g" % x.f_pvalue
info['Scale:'] = lambda x: "%#8.5g" % x.scale
out = {}
for key, func in info.items():
try:
out[key] = func(results)
except (AttributeError, KeyError, NotImplementedError):
# NOTE: some models do not have loglike defined (RLM),
# so raise NotImplementedError
pass
return out | Create a dict with information about the model | summary_model | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, float_format="%.4f"):
"""create a summary table of parameters from results instance
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : {str, None}
optional name for the endogenous variable, default is "y"
xname : {list[str], None}
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_header : bool
If false (default), then the header row is added. If true, then no
header row is added.
float_format : str
float formatting options (e.g. ".3g")
Returns
-------
params_table : SimpleTable instance
"""
if isinstance(results, tuple):
results, params, bse, tvalues, pvalues, conf_int = results
else:
params = results.params
bse = results.bse
tvalues = results.tvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
data = np.array([params, bse, tvalues, pvalues]).T
data = np.hstack([data, conf_int])
data = pd.DataFrame(data)
if use_t:
data.columns = ['Coef.', 'Std.Err.', 't', 'P>|t|',
'[' + str(alpha / 2), str(1 - alpha / 2) + ']']
else:
data.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha / 2), str(1 - alpha / 2) + ']']
if not xname:
try:
data.index = results.model.data.param_names
except AttributeError:
data.index = results.model.exog_names
else:
data.index = xname
return data | create a summary table of parameters from results instance
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : {str, None}
optional name for the endogenous variable, default is "y"
xname : {list[str], None}
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_header : bool
If false (default), then the header row is added. If true, then no
header row is added.
float_format : str
float formatting options (e.g. ".3g")
Returns
-------
params_table : SimpleTable instance | summary_params | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def _col_params(result, float_format='%.4f', stars=True, include_r2=False):
"""Stack coefficients and standard errors in single column
"""
# Extract parameters
res = summary_params(result)
# Format float
for col in res.columns[:2]:
res[col] = res[col].apply(lambda x: float_format % x)
# Std.Errors in parentheses
res.iloc[:, 1] = '(' + res.iloc[:, 1] + ')'
# Significance stars
if stars:
idx = res.iloc[:, 3] < .1
res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'
idx = res.iloc[:, 3] < .05
res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'
idx = res.iloc[:, 3] < .01
res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'
# Stack Coefs and Std.Errors
res = res.iloc[:, :2]
res = res.stack(**FUTURE_STACK)
# Add R-squared
if include_r2:
rsquared = getattr(result, 'rsquared', np.nan)
rsquared_adj = getattr(result, 'rsquared_adj', np.nan)
r2 = pd.Series({('R-squared', ""): rsquared,
('R-squared Adj.', ""): rsquared_adj})
if r2.notnull().any():
r2 = r2.apply(lambda x: float_format % x)
res = pd.concat([res, r2], axis=0)
res = pd.DataFrame(res)
res.columns = [str(result.model.endog_names)]
return res | Stack coefficients and standard errors in single column | _col_params | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def _col_info(result, info_dict=None):
"""Stack model info in a column
"""
if info_dict is None:
info_dict = {}
out = []
index = []
for i in info_dict:
if isinstance(info_dict[i], dict):
# this is a specific model info_dict, but not for this result...
continue
try:
out.append(info_dict[i](result))
except AttributeError:
out.append('')
index.append(i)
out = pd.DataFrame({str(result.model.endog_names): out}, index=index)
return out | Stack model info in a column | _col_info | python | statsmodels/statsmodels | statsmodels/iolib/summary2.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary2.py | BSD-3-Clause |
def __enter__(self):
"""When entering, return the embedded object"""
return self._obj | When entering, return the embedded object | __enter__ | python | statsmodels/statsmodels | statsmodels/iolib/openfile.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/openfile.py | BSD-3-Clause |
def __exit__(self, *args):
"""Do not hide anything"""
return False | Do not hide anything | __exit__ | python | statsmodels/statsmodels | statsmodels/iolib/openfile.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/openfile.py | BSD-3-Clause |
def get_file_obj(fname, mode="r", encoding=None):
"""
Light wrapper to handle strings, path objects and let files (anything else)
pass through.
It also handle '.gz' files.
Parameters
----------
fname : str, path object or file-like object
File to open / forward
mode : str
Argument passed to the 'open' or 'gzip.open' function
encoding : str
For Python 3 only, specify the encoding of the file
Returns
-------
A file-like object that is always a context-manager. If the `fname` was
already a file-like object, the returned context manager *will not
close the file*.
"""
if _is_string_like(fname):
fname = Path(fname)
if isinstance(fname, Path):
return fname.open(mode=mode, encoding=encoding)
elif hasattr(fname, "open"):
return fname.open(mode=mode, encoding=encoding)
try:
return open(fname, mode, encoding=encoding)
except TypeError:
try:
# Make sure the object has the write methods
if "r" in mode:
fname.read
if "w" in mode or "a" in mode:
fname.write
except AttributeError:
raise ValueError("fname must be a string or a file-like object")
return EmptyContextManager(fname) | Light wrapper to handle strings, path objects and let files (anything else)
pass through.
It also handle '.gz' files.
Parameters
----------
fname : str, path object or file-like object
File to open / forward
mode : str
Argument passed to the 'open' or 'gzip.open' function
encoding : str
For Python 3 only, specify the encoding of the file
Returns
-------
A file-like object that is always a context-manager. If the `fname` was
already a file-like object, the returned context manager *will not
close the file*. | get_file_obj | python | statsmodels/statsmodels | statsmodels/iolib/openfile.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/openfile.py | BSD-3-Clause |
def d_or_f(x, width=6):
"""convert number to string with either integer of float formatting
This is used internally for nobs and degrees of freedom which are usually
integers but can be float in some cases.
Parameters
----------
x : int or float
width : int
only used if x is nan
Returns
-------
str : str
number as formatted string
"""
if np.isnan(x):
return (width - 3) * ' ' + 'NaN'
if x // 1 == x:
return "%#6d" % x
else:
return "%#8.2f" % x | convert number to string with either integer of float formatting
This is used internally for nobs and degrees of freedom which are usually
integers but can be float in some cases.
Parameters
----------
x : int or float
width : int
only used if x is nan
Returns
-------
str : str
number as formatted string | d_or_f | python | statsmodels/statsmodels | statsmodels/iolib/summary.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary.py | BSD-3-Clause |
def ols_printer():
"""
print summary table for ols models
"""
table = str(general_table)+'\n'+str(parameter_table)
return table | print summary table for ols models | summary.ols_printer | python | statsmodels/statsmodels | statsmodels/iolib/summary.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary.py | BSD-3-Clause |
def summary(self, yname=None, xname=None, title=0, alpha=.05,
returns='text', model_info=None):
"""
Parameters
----------
yname : str
optional, Default is `Y`
xname : list[str]
optional, Default is `X.#` for # in p the number of regressors
Confidance interval : (0,1) not implimented
title : str
optional, Defualt is 'Generalized linear model'
returns : str
'text', 'table', 'csv', 'latex', 'html'
Returns
-------
Default :
returns='print'
Prints the summarirized results
Option :
returns='text'
Prints the summarirized results
Option :
returns='table'
SimpleTable instance : summarizing the fit of a linear model.
Option :
returns='csv'
returns a string of csv of the results, to import into a spreadsheet
Option :
returns='latex'
Not implimented yet
Option :
returns='HTML'
Not implimented yet
Examples (needs updating)
--------
>>> import statsmodels as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_results = sm.OLS(data.endog, data.exog).results
>>> print ols_results.summary()
...
Notes
-----
conf_int calculated from normal dist.
"""
if title == 0:
title = _model_types[self.model.__class__.__name__]
if xname is not None and len(xname) != len(self.params):
# GH 2298
raise ValueError('User supplied xnames must have the same number of '
'entries as the number of model parameters '
'({})'.format(len(self.params)))
yname, xname = _getnames(self, yname, xname)
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
modeltype = self.model.__class__.__name__
nobs = self.nobs
df_model = self.df_model
df_resid = self.df_resid
#General part of the summary table, Applicable to all? models
#------------------------------------------------------------
# TODO: define this generically, overwrite in model classes
#replace definition of stubs data by single list
#e.g.
gen_left = [('Model type:', [modeltype]),
('Date:', [date]),
('Dependent Variable:', yname), # TODO: What happens with multiple names?
('df model', [df_model])
]
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_title = title
gen_header = None
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title=gen_title,
txt_fmt=gen_fmt
)
gen_stubs_right = ('Method:',
'Time:',
'Number of Obs:',
'df resid')
gen_data_right = ([modeltype], #was dist family need to look at more
time_of_day,
[nobs],
[df_resid]
)
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title=gen_title,
txt_fmt=gen_fmt
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
# Parameters part of the summary table
# ------------------------------------
# Note: this is not necessary since we standardized names,
# only t versus normal
tstats = {'OLS': self.t(),
'GLS': self.t(),
'GLSAR': self.t(),
'WLS': self.t(),
'RLM': self.t(),
'GLM': self.t()}
prob_stats = {'OLS': self.pvalues,
'GLS': self.pvalues,
'GLSAR': self.pvalues,
'WLS': self.pvalues,
'RLM': self.pvalues,
'GLM': self.pvalues
}
# Dictionary to store the header names for the parameter part of the
# summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
param_header = {
'OLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLSAR' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'WLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLM' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'], #glm uses t-distribution
'RLM' : ['coef', 'std err', 'z', 'P>|z|', alp + ' Conf. Interval'] #checke z
}
params_stubs = xname
params = self.params
conf_int = self.conf_int(alpha)
std_err = self.bse
exog_len = lrange(len(xname))
tstat = tstats[modeltype]
prob_stat = prob_stats[modeltype]
# Simpletable should be able to handle the formating
params_data = lzip(["%#6.4g" % (params[i]) for i in exog_len],
["%#6.4f" % (std_err[i]) for i in exog_len],
["%#6.4f" % (tstat[i]) for i in exog_len],
["%#6.4f" % (prob_stat[i]) for i in exog_len],
["(%#5g, %#5g)" % tuple(conf_int[i]) for i in exog_len])
parameter_table = SimpleTable(params_data,
param_header[modeltype],
params_stubs,
title=None,
txt_fmt=fmt_2
)
#special table
#-------------
#TODO: exists in linear_model, what about other models
#residual diagnostics
#output options
#--------------
#TODO: JP the rest needs to be fixed, similar to summary in linear_model
def ols_printer():
"""
print summary table for ols models
"""
table = str(general_table)+'\n'+str(parameter_table)
return table
def glm_printer():
table = str(general_table)+'\n'+str(parameter_table)
return table
printers = {'OLS': ols_printer, 'GLM': glm_printer}
if returns == 'print':
try:
return printers[modeltype]()
except KeyError:
return printers['OLS']() | Parameters
----------
yname : str
optional, Default is `Y`
xname : list[str]
optional, Default is `X.#` for # in p the number of regressors
Confidance interval : (0,1) not implimented
title : str
optional, Defualt is 'Generalized linear model'
returns : str
'text', 'table', 'csv', 'latex', 'html'
Returns
-------
Default :
returns='print'
Prints the summarirized results
Option :
returns='text'
Prints the summarirized results
Option :
returns='table'
SimpleTable instance : summarizing the fit of a linear model.
Option :
returns='csv'
returns a string of csv of the results, to import into a spreadsheet
Option :
returns='latex'
Not implimented yet
Option :
returns='HTML'
Not implimented yet
Examples (needs updating)
--------
>>> import statsmodels as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_results = sm.OLS(data.endog, data.exog).results
>>> print ols_results.summary()
...
Notes
-----
conf_int calculated from normal dist. | summary | python | statsmodels/statsmodels | statsmodels/iolib/summary.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary.py | BSD-3-Clause |
def _getnames(self, yname=None, xname=None):
'''extract names from model or construct names
'''
if yname is None:
if getattr(self.model, 'endog_names', None) is not None:
yname = self.model.endog_names
else:
yname = 'y'
if xname is None:
if getattr(self.model, 'exog_names', None) is not None:
xname = self.model.exog_names
else:
xname = ['var_%d' % i for i in range(len(self.params))]
return yname, xname | extract names from model or construct names | _getnames | python | statsmodels/statsmodels | statsmodels/iolib/summary.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/iolib/summary.py | BSD-3-Clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.