code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def acovf_explicit(ar, ma, nobs):
'''add correlation of MA representation explicitely
'''
ir = arma_impulse_response(ar, ma)
acovfexpl = [np.dot(ir[:nobs-t], ir[t:nobs]) for t in range(10)]
return acovfexpl | add correlation of MA representation explicitely | acovf_explicit | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/example_arma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/example_arma.py | BSD-3-Clause |
def autocorr(s, axis=-1):
"""Returns the autocorrelation of signal s at all lags. Adheres to the
definition r(k) = E{s(n)s*(n-k)} where E{} is the expectation operator.
"""
N = s.shape[axis]
S = np.fft.fft(s, n=2*N-1, axis=axis)
sxx = np.fft.ifft(S*S.conjugate(), axis=axis).real[:N]
return sxx/N | Returns the autocorrelation of signal s at all lags. Adheres to the
definition r(k) = E{s(n)s*(n-k)} where E{} is the expectation operator. | autocorr | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/example_arma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/example_arma.py | BSD-3-Clause |
def norm_corr(x,y,mode = 'valid'):
"""Returns the correlation between two ndarrays, by calling np.correlate in
'same' mode and normalizing the result by the std of the arrays and by
their lengths. This results in a correlation = 1 for an auto-correlation"""
return ( np.correlate(x,y,mode) /
(np.std(x)*np.std(y)*(x.shape[-1])) ) | Returns the correlation between two ndarrays, by calling np.correlate in
'same' mode and normalizing the result by the std of the arrays and by
their lengths. This results in a correlation = 1 for an auto-correlation | norm_corr | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/example_arma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/example_arma.py | BSD-3-Clause |
def pltxcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
"""
call signature::
def xcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed:
c /= np.sqrt(np.dot(x, x) * np.dot(y, y))
if maxlags is None:
maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maxlags must be None or strictly '
'positive < %d' % Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
self.plot(lags, c, **kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b | call signature::
def xcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py | pltxcorr | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/example_arma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/example_arma.py | BSD-3-Clause |
def _ll_nb1(y, X, beta, alph):
'''Negative Binomial regression (type 1 likelihood)'''
ll = _ll_nbp(y, X, beta, alph, Q=1)
return ll | Negative Binomial regression (type 1 likelihood) | _ll_nb1 | python | statsmodels/statsmodels | statsmodels/sandbox/examples/example_nbin.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/examples/example_nbin.py | BSD-3-Clause |
def _ll_nb2(y, X, beta, alph):
'''Negative Binomial regression (type 2 likelihood)'''
ll = _ll_nbp(y, X, beta, alph, Q=0)
return ll | Negative Binomial regression (type 2 likelihood) | _ll_nb2 | python | statsmodels/statsmodels | statsmodels/sandbox/examples/example_nbin.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/examples/example_nbin.py | BSD-3-Clause |
def _ll_geom(y, X, beta):
'''Geometric regression'''
ll = _ll_nbp(y, X, beta, alph=1, Q=0)
return ll | Geometric regression | _ll_geom | python | statsmodels/statsmodels | statsmodels/sandbox/examples/example_nbin.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/examples/example_nbin.py | BSD-3-Clause |
def print_results2(res):
groupind = res.groups
#res.fitjoint() #not really necessary, because called by ftest_summary
ft = res.ftest_summary()
#print ft[0] #skip because table is nicer
templ = \
'''Table of F-tests for overall or pairwise equality of coefficients'
%(tab)s
Notes: p-values are not corrected for many tests
(no Bonferroni correction)
* : reject at 5%% uncorrected confidence level
Null hypothesis: all or pairwise coefficient are the same'
Alternative hypothesis: all coefficients are different'
Comparison with stats.f_oneway
%(statsfow)s
Likelihood Ratio Test
%(lrtest)s
Null model: pooled all coefficients are the same across groups,'
Alternative model: all coefficients are allowed to be different'
not verified but looks close to f-test result'
OLS parameters by group from individual, separate ols regressions'
%(olsbg)s
for group in sorted(res.olsbygroup):
r = res.olsbygroup[group]
print group, r.params
Check for heteroscedasticity, '
variance and standard deviation for individual regressions'
%(grh)s
variance ', res.sigmabygroup
standard dev', np.sqrt(res.sigmabygroup)
'''
from statsmodels.iolib import SimpleTable
resvals = {}
resvals['tab'] = str(SimpleTable([([f'{row[0]!r}']
+ list(row[1])
+ ['*']*(row[1][1]>0.5).item() ) for row in ft[1]],
headers=['pair', 'F-statistic','p-value','df_denom',
'df_num']))
resvals['statsfow'] = str(stats.f_oneway(*[y[groupind==gr] for gr in
res.unique]))
#resvals['lrtest'] = str(res.lr_test())
resvals['lrtest'] = str(SimpleTable([res.lr_test()],
headers=['likelihood ratio', 'p-value', 'df'] ))
resvals['olsbg'] = str(SimpleTable([[group]
+ res.olsbygroup[group].params.tolist()
for group in sorted(res.olsbygroup)]))
resvals['grh'] = str(SimpleTable(np.vstack([res.sigmabygroup,
np.sqrt(res.sigmabygroup)]),
headers=res.unique.tolist()))
return templ % resvals | Table of F-tests for overall or pairwise equality of coefficients'
%(tab)s
Notes: p-values are not corrected for many tests
(no Bonferroni correction)
* : reject at 5%% uncorrected confidence level
Null hypothesis: all or pairwise coefficient are the same'
Alternative hypothesis: all coefficients are different'
Comparison with stats.f_oneway
%(statsfow)s
Likelihood Ratio Test
%(lrtest)s
Null model: pooled all coefficients are the same across groups,'
Alternative model: all coefficients are allowed to be different'
not verified but looks close to f-test result'
OLS parameters by group from individual, separate ols regressions'
%(olsbg)s
for group in sorted(res.olsbygroup):
r = res.olsbygroup[group]
print group, r.params
Check for heteroscedasticity, '
variance and standard deviation for individual regressions'
%(grh)s
variance ', res.sigmabygroup
standard dev', np.sqrt(res.sigmabygroup) | print_results2 | python | statsmodels/statsmodels | statsmodels/sandbox/examples/ex_onewaygls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/examples/ex_onewaygls.py | BSD-3-Clause |
def loglike_ar1(x, rho):
'''loglikelihood of AR(1) process, as a test case
sigma_u partially hard coded
Greene chapter 12 eq. (12-31)
'''
x = np.asarray(x)
u = np.r_[x[0], x[1:] - rho * x[:-1]]
sigma_u2 = 2*(1-rho**2)
loglik = 0.5*(-(u**2).sum(0) / sigma_u2 + np.log(1-rho**2)
- x.shape[0] * (np.log(2*np.pi) + np.log(sigma_u2)))
return loglik | loglikelihood of AR(1) process, as a test case
sigma_u partially hard coded
Greene chapter 12 eq. (12-31) | loglike_ar1 | python | statsmodels/statsmodels | statsmodels/sandbox/archive/linalg_covmat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/linalg_covmat.py | BSD-3-Clause |
def ar2transform(x, arcoefs):
'''
(Greene eq 12-30)
'''
a1, a2 = arcoefs
y = np.zeros_like(x)
y[0] = np.sqrt((1+a2) * ((1-a2)**2 - a1**2) / (1-a2)) * x[0]
y[1] = np.sqrt(1-a2**2) * x[2] - a1 * np.sqrt(1-a1**2)/(1-a2) * x[1] #TODO:wrong index in x
y[2:] = x[2:] - a1 * x[1:-1] - a2 * x[:-2]
return y | (Greene eq 12-30) | ar2transform | python | statsmodels/statsmodels | statsmodels/sandbox/archive/linalg_covmat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/linalg_covmat.py | BSD-3-Clause |
def mvn_loglike(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
sigmainv = linalg.inv(sigma)
logdetsigma = np.log(np.linalg.det(sigma))
nobs = len(x)
llf = - np.dot(x, np.dot(sigmainv, x))
llf -= nobs * np.log(2 * np.pi)
llf -= logdetsigma
llf *= 0.5
return llf | loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient | mvn_loglike | python | statsmodels/statsmodels | statsmodels/sandbox/archive/linalg_covmat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/linalg_covmat.py | BSD-3-Clause |
def yt_minv_y(self, y):
'''xSigmainvx
does not use stored cholesky yet
'''
return np.dot(x,linalg.cho_solve(linalg.cho_factor(self.m),x)) | xSigmainvx
does not use stored cholesky yet | yt_minv_y | python | statsmodels/statsmodels | statsmodels/sandbox/archive/linalg_decomp_1.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/linalg_decomp_1.py | BSD-3-Clause |
def tiny2zero(x, eps = 1e-15):
'''replace abs values smaller than eps by zero, makes copy
'''
mask = np.abs(x.copy()) < eps
x[mask] = 0
return x | replace abs values smaller than eps by zero, makes copy | tiny2zero | python | statsmodels/statsmodels | statsmodels/sandbox/archive/linalg_decomp_1.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/linalg_decomp_1.py | BSD-3-Clause |
def acovf_fft(x, demean=True):
'''autocovariance function with call to fftconvolve, biased
Parameters
----------
x : array_like
timeseries, signal
demean : bool
If true, then demean time series
Returns
-------
acovf : ndarray
autocovariance for data, same length as x
might work for nd in parallel with time along axis 0
'''
from scipy import signal
x = np.asarray(x)
if demean:
x = x - x.mean()
signal.fftconvolve(x,x[::-1])[len(x)-1:len(x)+10]/x.shape[0] | autocovariance function with call to fftconvolve, biased
Parameters
----------
x : array_like
timeseries, signal
demean : bool
If true, then demean time series
Returns
-------
acovf : ndarray
autocovariance for data, same length as x
might work for nd in parallel with time along axis 0 | acovf_fft | python | statsmodels/statsmodels | statsmodels/sandbox/archive/tsa.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/tsa.py | BSD-3-Clause |
def _opt_wtd_nuis_regress(self, test_vals):
"""
A function that is optimized over nuisance parameters to conduct a
hypothesis test for the parameters of interest
Parameters
----------
params: 1d array
The regression coefficients of the model. This includes the
nuisance and parameters of interests.
Returns
-------
llr : float
-2 times the log likelihood of the nuisance parameters and the
hypothesized value of the parameter(s) of interest.
"""
test_params = test_vals.reshape(self.model.nvar, 1)
est_vect = self.model.uncens_exog * (self.model.uncens_endog -
np.dot(self.model.uncens_exog,
test_params))
eta_star = self._modif_newton(np.zeros(self.model.nvar), est_vect,
self.model._fit_weights)
denom = np.sum(self.model._fit_weights) + np.dot(eta_star, est_vect.T)
self.new_weights = self.model._fit_weights / denom
return -1 * np.sum(np.log(self.new_weights)) | A function that is optimized over nuisance parameters to conduct a
hypothesis test for the parameters of interest
Parameters
----------
params: 1d array
The regression coefficients of the model. This includes the
nuisance and parameters of interests.
Returns
-------
llr : float
-2 times the log likelihood of the nuisance parameters and the
hypothesized value of the parameter(s) of interest. | _opt_wtd_nuis_regress | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def _EM_test(self, nuisance_params, params=None, param_nums=None,
b0_vals=None, F=None, survidx=None, uncens_nobs=None,
numcensbelow=None, km=None, uncensored=None, censored=None,
maxiter=None, ftol=None):
"""
Uses EM algorithm to compute the maximum likelihood of a test
Parameters
----------
nuisance_params : ndarray
Vector of values to be used as nuisance params.
maxiter : int
Number of iterations in the EM algorithm for a parameter vector
Returns
-------
-2 ''*'' log likelihood ratio at hypothesized values and
nuisance params
Notes
-----
Optional parameters are provided by the test_beta function.
"""
iters = 0
params[param_nums] = b0_vals
nuis_param_index = np.int_(np.delete(np.arange(self.model.nvar),
param_nums))
params[nuis_param_index] = nuisance_params
to_test = params.reshape(self.model.nvar, 1)
opt_res = np.inf
diff = np.inf
while iters < maxiter and diff > ftol:
F = F.flatten()
death = np.cumsum(F[::-1])
survivalprob = death[::-1]
surv_point_mat = np.dot(F.reshape(-1, 1),
1. / survivalprob[survidx].reshape(1, - 1))
surv_point_mat = add_constant(surv_point_mat)
summed_wts = np.cumsum(surv_point_mat, axis=1)
wts = summed_wts[np.int_(np.arange(uncens_nobs)),
numcensbelow[uncensored]]
# ^E step
# See Zhou 2005, section 3.
self.model._fit_weights = wts
new_opt_res = self._opt_wtd_nuis_regress(to_test)
# ^ Uncensored weights' contribution to likelihood value.
F = self.new_weights
# ^ M step
diff = np.abs(new_opt_res - opt_res)
opt_res = new_opt_res
iters = iters + 1
death = np.cumsum(F.flatten()[::-1])
survivalprob = death[::-1]
llike = -opt_res + np.sum(np.log(survivalprob[survidx]))
wtd_km = km.flatten() / np.sum(km)
survivalmax = np.cumsum(wtd_km[::-1])[::-1]
llikemax = np.sum(np.log(wtd_km[uncensored])) + \
np.sum(np.log(survivalmax[censored]))
if iters == maxiter:
warnings.warn('The EM reached the maximum number of iterations',
IterationLimitWarning)
return -2 * (llike - llikemax) | Uses EM algorithm to compute the maximum likelihood of a test
Parameters
----------
nuisance_params : ndarray
Vector of values to be used as nuisance params.
maxiter : int
Number of iterations in the EM algorithm for a parameter vector
Returns
-------
-2 ''*'' log likelihood ratio at hypothesized values and
nuisance params
Notes
-----
Optional parameters are provided by the test_beta function. | _EM_test | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def _ci_limits_beta(self, b0, param_num=None):
"""
Returns the difference between the log likelihood for a
parameter and some critical value.
Parameters
----------
b0: float
Value of a regression parameter
param_num : int
Parameter index of b0
"""
return self.test_beta([b0], [param_num])[0] - self.r0 | Returns the difference between the log likelihood for a
parameter and some critical value.
Parameters
----------
b0: float
Value of a regression parameter
param_num : int
Parameter index of b0 | _ci_limits_beta | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def _is_tied(self, endog, censors):
"""
Indicated if an observation takes the same value as the next
ordered observation.
Parameters
----------
endog : ndarray
Models endogenous variable
censors : ndarray
arrat indicating a censored array
Returns
-------
indic_ties : ndarray
ties[i]=1 if endog[i]==endog[i+1] and
censors[i]=censors[i+1]
"""
nobs = int(self.nobs)
endog_idx = endog[np.arange(nobs - 1)] == (
endog[np.arange(nobs - 1) + 1])
censors_idx = censors[np.arange(nobs - 1)] == (
censors[np.arange(nobs - 1) + 1])
indic_ties = endog_idx * censors_idx # Both true
return np.int_(indic_ties) | Indicated if an observation takes the same value as the next
ordered observation.
Parameters
----------
endog : ndarray
Models endogenous variable
censors : ndarray
arrat indicating a censored array
Returns
-------
indic_ties : ndarray
ties[i]=1 if endog[i]==endog[i+1] and
censors[i]=censors[i+1] | _is_tied | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def _km_w_ties(self, tie_indic, untied_km):
"""
Computes KM estimator value at each observation, taking into acocunt
ties in the data.
Parameters
----------
tie_indic: 1d array
Indicates if the i'th observation is the same as the ith +1
untied_km: 1d array
Km estimates at each observation assuming no ties.
"""
# TODO: Vectorize, even though it is only 1 pass through for any
# function call
num_same = 1
idx_nums = []
for obs_num in np.arange(int(self.nobs - 1))[::-1]:
if tie_indic[obs_num] == 1:
idx_nums.append(obs_num)
num_same = num_same + 1
untied_km[obs_num] = untied_km[obs_num + 1]
elif tie_indic[obs_num] == 0 and num_same > 1:
idx_nums.append(max(idx_nums) + 1)
idx_nums = np.asarray(idx_nums)
untied_km[idx_nums] = untied_km[idx_nums]
num_same = 1
idx_nums = []
return untied_km.reshape(self.nobs, 1) | Computes KM estimator value at each observation, taking into acocunt
ties in the data.
Parameters
----------
tie_indic: 1d array
Indicates if the i'th observation is the same as the ith +1
untied_km: 1d array
Km estimates at each observation assuming no ties. | _km_w_ties | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def _make_km(self, endog, censors):
"""
Computes the Kaplan-Meier estimate for the weights in the AFT model
Parameters
----------
endog: nx1 array
Array of response variables
censors: nx1 array
Censor-indicating variable
Returns
-------
Kaplan Meier estimate for each observation
Notes
-----
This function makes calls to _is_tied and km_w_ties to handle ties in
the data.If a censored observation and an uncensored observation has
the same value, it is assumed that the uncensored happened first.
"""
nobs = self.nobs
num = (nobs - (np.arange(nobs) + 1.))
denom = (nobs - (np.arange(nobs) + 1.) + 1.)
km = (num / denom).reshape(nobs, 1)
km = km ** np.abs(censors - 1.)
km = np.cumprod(km) # If no ties, this is kaplan-meier
tied = self._is_tied(endog, censors)
wtd_km = self._km_w_ties(tied, km)
return (censors / wtd_km).reshape(nobs, 1) | Computes the Kaplan-Meier estimate for the weights in the AFT model
Parameters
----------
endog: nx1 array
Array of response variables
censors: nx1 array
Censor-indicating variable
Returns
-------
Kaplan Meier estimate for each observation
Notes
-----
This function makes calls to _is_tied and km_w_ties to handle ties in
the data.If a censored observation and an uncensored observation has
the same value, it is assumed that the uncensored happened first. | _make_km | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def fit(self):
"""
Fits an AFT model and returns results instance
Parameters
----------
None
Returns
-------
Results instance.
Notes
-----
To avoid dividing by zero, max(endog) is assumed to be uncensored.
"""
return AFTResults(self) | Fits an AFT model and returns results instance
Parameters
----------
None
Returns
-------
Results instance.
Notes
-----
To avoid dividing by zero, max(endog) is assumed to be uncensored. | fit | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def params(self):
"""
Fits an AFT model and returns parameters.
Parameters
----------
None
Returns
-------
Fitted params
Notes
-----
To avoid dividing by zero, max(endog) is assumed to be uncensored.
"""
self.model.modif_censors = np.copy(self.model.censors)
self.model.modif_censors[-1] = 1
wts = self.model._make_km(self.model.endog, self.model.modif_censors)
res = WLS(self.model.endog, self.model.exog, wts).fit()
params = res.params
return params | Fits an AFT model and returns parameters.
Parameters
----------
None
Returns
-------
Fitted params
Notes
-----
To avoid dividing by zero, max(endog) is assumed to be uncensored. | params | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def test_beta(self, b0_vals, param_nums, ftol=10 ** - 5, maxiter=30,
print_weights=1):
"""
Returns the profile log likelihood for regression parameters
'param_num' at 'b0_vals.'
Parameters
----------
b0_vals : list
The value of parameters to be tested
param_num : list
Which parameters to be tested
maxiter : int, optional
How many iterations to use in the EM algorithm. Default is 30
ftol : float, optional
The function tolerance for the EM optimization.
Default is 10''**''-5
print_weights : bool
If true, returns the weights tate maximize the profile
log likelihood. Default is False
Returns
-------
test_results : tuple
The log-likelihood and p-pvalue of the test.
Notes
-----
The function will warn if the EM reaches the maxiter. However, when
optimizing over nuisance parameters, it is possible to reach a
maximum number of inner iterations for a specific value for the
nuisance parameters while the resultsof the function are still valid.
This usually occurs when the optimization over the nuisance parameters
selects parameter values that yield a log-likihood ratio close to
infinity.
Examples
--------
>>> import statsmodels.api as sm
>>> import numpy as np
# Test parameter is .05 in one regressor no intercept model
>>> data=sm.datasets.heart.load()
>>> y = np.log10(data.endog)
>>> x = data.exog
>>> cens = data.censors
>>> model = sm.emplike.emplikeAFT(y, x, cens)
>>> res=model.test_beta([0], [0])
>>> res
(1.4657739632606308, 0.22601365256959183)
#Test slope is 0 in model with intercept
>>> data=sm.datasets.heart.load()
>>> y = np.log10(data.endog)
>>> x = data.exog
>>> cens = data.censors
>>> model = sm.emplike.emplikeAFT(y, sm.add_constant(x), cens)
>>> res = model.test_beta([0], [1])
>>> res
(4.623487775078047, 0.031537049752572731)
"""
censors = self.model.censors
endog = self.model.endog
exog = self.model.exog
uncensored = (censors == 1).flatten()
censored = (censors == 0).flatten()
uncens_endog = endog[uncensored]
uncens_exog = exog[uncensored, :]
reg_model = OLS(uncens_endog, uncens_exog).fit()
llr, pval, new_weights = reg_model.el_test(b0_vals, param_nums,
return_weights=True) # Needs to be changed
km = self.model._make_km(endog, censors).flatten() # when merged
uncens_nobs = self.model.uncens_nobs
F = np.asarray(new_weights).reshape(uncens_nobs)
# Step 0 ^
params = self.params()
survidx = np.where(censors == 0)
survidx = survidx[0] - np.arange(len(survidx[0]))
numcensbelow = np.int_(np.cumsum(1 - censors))
if len(param_nums) == len(params):
llr = self._EM_test([], F=F, params=params,
param_nums=param_nums,
b0_vals=b0_vals, survidx=survidx,
uncens_nobs=uncens_nobs,
numcensbelow=numcensbelow, km=km,
uncensored=uncensored, censored=censored,
ftol=ftol, maxiter=25)
return llr, chi2.sf(llr, self.model.nvar)
else:
x0 = np.delete(params, param_nums)
try:
res = optimize.fmin(self._EM_test, x0,
(params, param_nums, b0_vals, F, survidx,
uncens_nobs, numcensbelow, km, uncensored,
censored, maxiter, ftol), full_output=1,
disp=0)
llr = res[1]
return llr, chi2.sf(llr, len(param_nums))
except np.linalg.LinAlgError:
return np.inf, 0 | Returns the profile log likelihood for regression parameters
'param_num' at 'b0_vals.'
Parameters
----------
b0_vals : list
The value of parameters to be tested
param_num : list
Which parameters to be tested
maxiter : int, optional
How many iterations to use in the EM algorithm. Default is 30
ftol : float, optional
The function tolerance for the EM optimization.
Default is 10''**''-5
print_weights : bool
If true, returns the weights tate maximize the profile
log likelihood. Default is False
Returns
-------
test_results : tuple
The log-likelihood and p-pvalue of the test.
Notes
-----
The function will warn if the EM reaches the maxiter. However, when
optimizing over nuisance parameters, it is possible to reach a
maximum number of inner iterations for a specific value for the
nuisance parameters while the resultsof the function are still valid.
This usually occurs when the optimization over the nuisance parameters
selects parameter values that yield a log-likihood ratio close to
infinity.
Examples
--------
>>> import statsmodels.api as sm
>>> import numpy as np
# Test parameter is .05 in one regressor no intercept model
>>> data=sm.datasets.heart.load()
>>> y = np.log10(data.endog)
>>> x = data.exog
>>> cens = data.censors
>>> model = sm.emplike.emplikeAFT(y, x, cens)
>>> res=model.test_beta([0], [0])
>>> res
(1.4657739632606308, 0.22601365256959183)
#Test slope is 0 in model with intercept
>>> data=sm.datasets.heart.load()
>>> y = np.log10(data.endog)
>>> x = data.exog
>>> cens = data.censors
>>> model = sm.emplike.emplikeAFT(y, sm.add_constant(x), cens)
>>> res = model.test_beta([0], [1])
>>> res
(4.623487775078047, 0.031537049752572731) | test_beta | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def ci_beta(self, param_num, beta_high, beta_low, sig=.05):
"""
Returns the confidence interval for a regression
parameter in the AFT model.
Parameters
----------
param_num : int
Parameter number of interest
beta_high : float
Upper bound for the confidence interval
beta_low : float
Lower bound for the confidence interval
sig : float, optional
Significance level. Default is .05
Notes
-----
If the function returns f(a) and f(b) must have different signs,
consider widening the search area by adjusting beta_low and
beta_high.
Also note that this process is computational intensive. There
are 4 levels of optimization/solving. From outer to inner:
1) Solving so that llr-critical value = 0
2) maximizing over nuisance parameters
3) Using EM at each value of nuisamce parameters
4) Using the _modified_Newton optimizer at each iteration
of the EM algorithm.
Also, for very unlikely nuisance parameters, it is possible for
the EM algorithm to not converge. This is not an indicator
that the solver did not find the correct solution. It just means
for a specific iteration of the nuisance parameters, the optimizer
was unable to converge.
If the user desires to verify the success of the optimization,
it is recommended to test the limits using test_beta.
"""
params = self.params()
self.r0 = chi2.ppf(1 - sig, 1)
ll = optimize.brentq(self._ci_limits_beta, beta_low,
params[param_num], (param_num))
ul = optimize.brentq(self._ci_limits_beta,
params[param_num], beta_high, (param_num))
return ll, ul | Returns the confidence interval for a regression
parameter in the AFT model.
Parameters
----------
param_num : int
Parameter number of interest
beta_high : float
Upper bound for the confidence interval
beta_low : float
Lower bound for the confidence interval
sig : float, optional
Significance level. Default is .05
Notes
-----
If the function returns f(a) and f(b) must have different signs,
consider widening the search area by adjusting beta_low and
beta_high.
Also note that this process is computational intensive. There
are 4 levels of optimization/solving. From outer to inner:
1) Solving so that llr-critical value = 0
2) maximizing over nuisance parameters
3) Using EM at each value of nuisamce parameters
4) Using the _modified_Newton optimizer at each iteration
of the EM algorithm.
Also, for very unlikely nuisance parameters, it is possible for
the EM algorithm to not converge. This is not an indicator
that the solver did not find the correct solution. It just means
for a specific iteration of the nuisance parameters, the optimizer
was unable to converge.
If the user desires to verify the success of the optimization,
it is recommended to test the limits using test_beta. | ci_beta | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def fit(self):
"""
Fits the model and provides regression results.
Returns
-------
Results : class
Empirical likelihood regression class.
"""
exog_with = add_constant(self.exog, prepend=True)
restricted_model = OLS(self.endog, exog_with)
restricted_fit = restricted_model.fit()
restricted_el = restricted_fit.el_test(
np.array([0]), np.array([0]), ret_params=1)
params = np.squeeze(restricted_el[3])
beta_hat_llr = restricted_el[0]
llf = np.sum(np.log(restricted_el[2]))
return OriginResults(restricted_model, params, beta_hat_llr, llf) | Fits the model and provides regression results.
Returns
-------
Results : class
Empirical likelihood regression class. | fit | python | statsmodels/statsmodels | statsmodels/emplike/originregress.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/originregress.py | BSD-3-Clause |
def el_test(self, b0_vals, param_nums, method='nm',
stochastic_exog=1, return_weights=0):
"""
Returns the llr and p-value for a hypothesized parameter value
for a regression that goes through the origin.
Parameters
----------
b0_vals : 1darray
The hypothesized value to be tested.
param_num : 1darray
Which parameters to test. Note this uses python
indexing but the '0' parameter refers to the intercept term,
which is assumed 0. Therefore, param_num should be > 0.
return_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. Default is False.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'.
stochastic_exog : bool
When TRUE, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. Default is TRUE.
Returns
-------
res : tuple
pvalue and likelihood ratio.
"""
b0_vals = np.hstack((0, b0_vals))
param_nums = np.hstack((0, param_nums))
test_res = self.model.fit().el_test(b0_vals, param_nums, method=method,
stochastic_exog=stochastic_exog,
return_weights=return_weights)
llr_test = test_res[0]
llr_res = llr_test - self.llr
pval = chi2.sf(llr_res, self.model.exog.shape[1] - 1)
if return_weights:
return llr_res, pval, test_res[2]
else:
return llr_res, pval | Returns the llr and p-value for a hypothesized parameter value
for a regression that goes through the origin.
Parameters
----------
b0_vals : 1darray
The hypothesized value to be tested.
param_num : 1darray
Which parameters to test. Note this uses python
indexing but the '0' parameter refers to the intercept term,
which is assumed 0. Therefore, param_num should be > 0.
return_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. Default is False.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'.
stochastic_exog : bool
When TRUE, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. Default is TRUE.
Returns
-------
res : tuple
pvalue and likelihood ratio. | el_test | python | statsmodels/statsmodels | statsmodels/emplike/originregress.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/originregress.py | BSD-3-Clause |
def conf_int_el(self, param_num, upper_bound=None,
lower_bound=None, sig=.05, method='nm',
stochastic_exog=True):
"""
Returns the confidence interval for a regression parameter when the
regression is forced through the origin.
Parameters
----------
param_num : int
The parameter number to be tested. Note this uses python
indexing but the '0' parameter refers to the intercept term.
upper_bound : float
The maximum value the upper confidence limit can be. The
closer this is to the confidence limit, the quicker the
computation. Default is .00001 confidence limit under normality.
lower_bound : float
The minimum value the lower confidence limit can be.
Default is .00001 confidence limit under normality.
sig : float, optional
The significance level. Default .05.
method : str, optional
Algorithm to optimize of nuisance params. Can be 'nm' or
'powell'. Default is 'nm'.
stochastic_exog : bool
Default is True.
Returns
-------
ci: tuple
The confidence interval for the parameter 'param_num'.
"""
r0 = chi2.ppf(1 - sig, 1)
param_num = np.array([param_num])
if upper_bound is None:
ci = np.asarray(self.model.fit().conf_int(.0001))
upper_bound = (np.squeeze(ci[param_num])[1])
if lower_bound is None:
ci = np.asarray(self.model.fit().conf_int(.0001))
lower_bound = (np.squeeze(ci[param_num])[0])
def f(b0):
b0 = np.array([b0])
val = self.el_test(
b0, param_num, method=method, stochastic_exog=stochastic_exog
)
return val[0] - r0
_param = np.squeeze(self.params[param_num])
lowerl = optimize.brentq(f, np.squeeze(lower_bound), _param)
upperl = optimize.brentq(f, _param, np.squeeze(upper_bound))
return (lowerl, upperl) | Returns the confidence interval for a regression parameter when the
regression is forced through the origin.
Parameters
----------
param_num : int
The parameter number to be tested. Note this uses python
indexing but the '0' parameter refers to the intercept term.
upper_bound : float
The maximum value the upper confidence limit can be. The
closer this is to the confidence limit, the quicker the
computation. Default is .00001 confidence limit under normality.
lower_bound : float
The minimum value the lower confidence limit can be.
Default is .00001 confidence limit under normality.
sig : float, optional
The significance level. Default .05.
method : str, optional
Algorithm to optimize of nuisance params. Can be 'nm' or
'powell'. Default is 'nm'.
stochastic_exog : bool
Default is True.
Returns
-------
ci: tuple
The confidence interval for the parameter 'param_num'. | conf_int_el | python | statsmodels/statsmodels | statsmodels/emplike/originregress.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/originregress.py | BSD-3-Clause |
def _opt_common_mu(self, mu):
"""
Optimizes the likelihood under the null hypothesis that all groups have
mean mu.
Parameters
----------
mu : float
The common mean.
Returns
-------
llr : float
-2 times the llr ratio, which is the test statistic.
"""
nobs = self.nobs
endog = self.endog
num_groups = self.num_groups
endog_asarray = np.zeros((nobs, num_groups))
obs_num = 0
for arr_num in range(len(endog)):
new_obs_num = obs_num + len(endog[arr_num])
endog_asarray[obs_num: new_obs_num, arr_num] = endog[arr_num] - \
mu
obs_num = new_obs_num
est_vect = endog_asarray
wts = np.ones(est_vect.shape[0]) * (1. / (est_vect.shape[0]))
eta_star = self._modif_newton(np.zeros(num_groups), est_vect, wts)
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr | Optimizes the likelihood under the null hypothesis that all groups have
mean mu.
Parameters
----------
mu : float
The common mean.
Returns
-------
llr : float
-2 times the llr ratio, which is the test statistic. | _opt_common_mu | python | statsmodels/statsmodels | statsmodels/emplike/elanova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/elanova.py | BSD-3-Clause |
def compute_ANOVA(self, mu=None, mu_start=0, return_weights=0):
"""
Returns -2 log likelihood, the pvalue and the maximum likelihood
estimate for a common mean.
Parameters
----------
mu : float
If a mu is specified, ANOVA is conducted with mu as the
common mean. Otherwise, the common mean is the maximum
empirical likelihood estimate of the common mean.
Default is None.
mu_start : float
Starting value for commean mean if specific mu is not specified.
Default = 0.
return_weights : bool
if TRUE, returns the weights on observations that maximize the
likelihood. Default is FALSE.
Returns
-------
res: tuple
The log-likelihood, p-value and estimate for the common mean.
"""
if mu is not None:
llr = self._opt_common_mu(mu)
pval = 1 - chi2.cdf(llr, self.num_groups - 1)
if return_weights:
return llr, pval, mu, self.new_weights
else:
return llr, pval, mu
else:
res = optimize.fmin_powell(self._opt_common_mu, mu_start,
full_output=1, disp=False)
llr = res[1]
mu_common = float(np.squeeze(res[0]))
pval = 1 - chi2.cdf(llr, self.num_groups - 1)
if return_weights:
return llr, pval, mu_common, self.new_weights
else:
return llr, pval, mu_common | Returns -2 log likelihood, the pvalue and the maximum likelihood
estimate for a common mean.
Parameters
----------
mu : float
If a mu is specified, ANOVA is conducted with mu as the
common mean. Otherwise, the common mean is the maximum
empirical likelihood estimate of the common mean.
Default is None.
mu_start : float
Starting value for commean mean if specific mu is not specified.
Default = 0.
return_weights : bool
if TRUE, returns the weights on observations that maximize the
likelihood. Default is FALSE.
Returns
-------
res: tuple
The log-likelihood, p-value and estimate for the common mean. | compute_ANOVA | python | statsmodels/statsmodels | statsmodels/emplike/elanova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/elanova.py | BSD-3-Clause |
def _opt_nuis_regress(self, nuisance_params, param_nums=None,
endog=None, exog=None,
nobs=None, nvar=None, params=None, b0_vals=None,
stochastic_exog=None):
"""
A function that is optimized over nuisance parameters to conduct a
hypothesis test for the parameters of interest.
Parameters
----------
nuisance_params: 1darray
Parameters to be optimized over.
Returns
-------
llr : float
-2 x the log-likelihood of the nuisance parameters and the
hypothesized value of the parameter(s) of interest.
"""
params[param_nums] = b0_vals
nuis_param_index = np.int_(np.delete(np.arange(nvar),
param_nums))
params[nuis_param_index] = nuisance_params
new_params = params.reshape(nvar, 1)
self.new_params = new_params
est_vect = exog * \
(endog - np.squeeze(np.dot(exog, new_params))).reshape(int(nobs), 1)
if not stochastic_exog:
exog_means = np.mean(exog, axis=0)[1:]
exog_mom2 = (np.sum(exog * exog, axis=0))[1:]\
/ nobs
mean_est_vect = exog[:, 1:] - exog_means
mom2_est_vect = (exog * exog)[:, 1:] - exog_mom2
regressor_est_vect = np.concatenate((mean_est_vect, mom2_est_vect),
axis=1)
est_vect = np.concatenate((est_vect, regressor_est_vect),
axis=1)
wts = np.ones(int(nobs)) * (1. / nobs)
x0 = np.zeros(est_vect.shape[1]).reshape(-1, 1)
try:
eta_star = self._modif_newton(x0, est_vect, wts)
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
# the following commented out code is to verify weights
# see open issue #1845
#self.new_weights /= self.new_weights.sum()
#if not np.allclose(self.new_weights.sum(), 1., rtol=0, atol=1e-10):
# raise RuntimeError('weights do not sum to 1')
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
except np.linalg.LinAlgError:
return np.inf | A function that is optimized over nuisance parameters to conduct a
hypothesis test for the parameters of interest.
Parameters
----------
nuisance_params: 1darray
Parameters to be optimized over.
Returns
-------
llr : float
-2 x the log-likelihood of the nuisance parameters and the
hypothesized value of the parameter(s) of interest. | _opt_nuis_regress | python | statsmodels/statsmodels | statsmodels/emplike/elregress.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/elregress.py | BSD-3-Clause |
def DescStat(endog):
"""
Returns an instance to conduct inference on descriptive statistics
via empirical likelihood. See DescStatUV and DescStatMV for more
information.
Parameters
----------
endog : ndarray
Array of data
Returns : DescStat instance
If k=1, the function returns a univariate instance, DescStatUV.
If k>1, the function returns a multivariate instance, DescStatMV.
"""
if endog.ndim == 1:
endog = endog.reshape(len(endog), 1)
if endog.shape[1] == 1:
return DescStatUV(endog)
if endog.shape[1] > 1:
return DescStatMV(endog) | Returns an instance to conduct inference on descriptive statistics
via empirical likelihood. See DescStatUV and DescStatMV for more
information.
Parameters
----------
endog : ndarray
Array of data
Returns : DescStat instance
If k=1, the function returns a univariate instance, DescStatUV.
If k>1, the function returns a multivariate instance, DescStatMV. | DescStat | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _log_star(self, eta, est_vect, weights, nobs):
"""
Transforms the log of observation probabilities in terms of the
Lagrange multiplier to the log 'star' of the probabilities.
Parameters
----------
eta : float
Lagrange multiplier
est_vect : ndarray (n,k)
Estimating equations vector
wts : nx1 array
Observation weights
Returns
------
data_star : ndarray
The weighted logstar of the estimting equations
Notes
-----
This function is only a placeholder for the _fit_Newton.
The function value is not used in optimization and the optimal value
is disregarded when computing the log likelihood ratio.
"""
data_star = np.log(weights) + (np.sum(weights) +\
np.dot(est_vect, eta))
idx = data_star < 1. / nobs
not_idx = ~idx
nx = nobs * data_star[idx]
data_star[idx] = np.log(1. / nobs) - 1.5 + nx * (2. - nx / 2)
data_star[not_idx] = np.log(data_star[not_idx])
return data_star | Transforms the log of observation probabilities in terms of the
Lagrange multiplier to the log 'star' of the probabilities.
Parameters
----------
eta : float
Lagrange multiplier
est_vect : ndarray (n,k)
Estimating equations vector
wts : nx1 array
Observation weights
Returns
------
data_star : ndarray
The weighted logstar of the estimting equations
Notes
-----
This function is only a placeholder for the _fit_Newton.
The function value is not used in optimization and the optimal value
is disregarded when computing the log likelihood ratio. | _log_star | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _hess(self, eta, est_vect, weights, nobs):
"""
Calculates the hessian of a weighted empirical likelihood
problem.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
hess : m x m array
Weighted hessian used in _wtd_modif_newton
"""
#eta = np.squeeze(eta)
data_star_doub_prime = np.sum(weights) + np.dot(est_vect, eta)
idx = data_star_doub_prime < 1. / nobs
not_idx = ~idx
data_star_doub_prime[idx] = - nobs ** 2
data_star_doub_prime[not_idx] = - (data_star_doub_prime[not_idx]) ** -2
wtd_dsdp = weights * data_star_doub_prime
return np.dot(est_vect.T, wtd_dsdp[:, None] * est_vect) | Calculates the hessian of a weighted empirical likelihood
problem.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
hess : m x m array
Weighted hessian used in _wtd_modif_newton | _hess | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _grad(self, eta, est_vect, weights, nobs):
"""
Calculates the gradient of a weighted empirical likelihood
problem
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
gradient : ndarray (m,1)
The gradient used in _wtd_modif_newton
"""
#eta = np.squeeze(eta)
data_star_prime = np.sum(weights) + np.dot(est_vect, eta)
idx = data_star_prime < 1. / nobs
not_idx = ~idx
data_star_prime[idx] = nobs * (2 - nobs * data_star_prime[idx])
data_star_prime[not_idx] = 1. / data_star_prime[not_idx]
return np.dot(weights * data_star_prime, est_vect) | Calculates the gradient of a weighted empirical likelihood
problem
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
gradient : ndarray (m,1)
The gradient used in _wtd_modif_newton | _grad | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _modif_newton(self, eta, est_vect, weights):
"""
Modified Newton's method for maximizing the log 'star' equation. This
function calls _fit_newton to find the optimal values of eta.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
params : 1xm array
Lagrange multiplier that maximizes the log-likelihood
"""
nobs = len(est_vect)
def f(x0):
return -np.sum(self._log_star(x0, est_vect, weights, nobs))
def grad(x0):
return -self._grad(x0, est_vect, weights, nobs)
def hess(x0):
return -self._hess(x0, est_vect, weights, nobs)
kwds = {'tol': 1e-8}
eta = eta.squeeze()
res = _fit_newton(f, grad, eta, (), kwds, hess=hess, maxiter=50, \
disp=0)
return res[0] | Modified Newton's method for maximizing the log 'star' equation. This
function calls _fit_newton to find the optimal values of eta.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
params : 1xm array
Lagrange multiplier that maximizes the log-likelihood | _modif_newton | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _find_eta(self, eta):
"""
Finding the root of sum(xi-h0)/(1+eta(xi-mu)) solves for
eta when computing ELR for univariate mean.
Parameters
----------
eta : float
Lagrange multiplier in the empirical likelihood maximization
Returns
-------
llr : float
n times the log likelihood value for a given value of eta
"""
return np.sum((self.endog - self.mu0) / \
(1. + eta * (self.endog - self.mu0))) | Finding the root of sum(xi-h0)/(1+eta(xi-mu)) solves for
eta when computing ELR for univariate mean.
Parameters
----------
eta : float
Lagrange multiplier in the empirical likelihood maximization
Returns
-------
llr : float
n times the log likelihood value for a given value of eta | _find_eta | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _ci_limits_mu(self, mu):
"""
Calculates the difference between the log likelihood of mu_test and a
specified critical value.
Parameters
----------
mu : float
Hypothesized value of the mean.
Returns
-------
diff : float
The difference between the log likelihood value of mu0 and
a specified value.
"""
return self.test_mean(mu)[0] - self.r0 | Calculates the difference between the log likelihood of mu_test and a
specified critical value.
Parameters
----------
mu : float
Hypothesized value of the mean.
Returns
-------
diff : float
The difference between the log likelihood value of mu0 and
a specified value. | _ci_limits_mu | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _find_gamma(self, gamma):
"""
Finds gamma that satisfies
sum(log(n * w(gamma))) - log(r0) = 0
Used for confidence intervals for the mean
Parameters
----------
gamma : float
Lagrange multiplier when computing confidence interval
Returns
-------
diff : float
The difference between the log-liklihood when the Lagrange
multiplier is gamma and a pre-specified value
"""
denom = np.sum((self.endog - gamma) ** -1)
new_weights = (self.endog - gamma) ** -1 / denom
return -2 * np.sum(np.log(self.nobs * new_weights)) - \
self.r0 | Finds gamma that satisfies
sum(log(n * w(gamma))) - log(r0) = 0
Used for confidence intervals for the mean
Parameters
----------
gamma : float
Lagrange multiplier when computing confidence interval
Returns
-------
diff : float
The difference between the log-liklihood when the Lagrange
multiplier is gamma and a pre-specified value | _find_gamma | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _opt_var(self, nuisance_mu, pval=False):
"""
This is the function to be optimized over a nuisance mean parameter
to determine the likelihood ratio for the variance
Parameters
----------
nuisance_mu : float
Value of a nuisance mean parameter
Returns
-------
llr : float
Log likelihood of a pre-specified variance holding the nuisance
parameter constant
"""
endog = self.endog
nobs = self.nobs
sig_data = ((endog - nuisance_mu) ** 2 \
- self.sig2_0)
mu_data = (endog - nuisance_mu)
est_vect = np.column_stack((mu_data, sig_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1 + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
if pval: # Used for contour plotting
return chi2.sf(-2 * llr, 1)
return -2 * llr | This is the function to be optimized over a nuisance mean parameter
to determine the likelihood ratio for the variance
Parameters
----------
nuisance_mu : float
Value of a nuisance mean parameter
Returns
-------
llr : float
Log likelihood of a pre-specified variance holding the nuisance
parameter constant | _opt_var | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _ci_limits_var(self, var):
"""
Used to determine the confidence intervals for the variance.
It calls test_var and when called by an optimizer,
finds the value of sig2_0 that is chi2.ppf(significance-level)
Parameters
----------
var_test : float
Hypothesized value of the variance
Returns
-------
diff : float
The difference between the log likelihood ratio at var_test and a
pre-specified value.
"""
return self.test_var(var)[0] - self.r0 | Used to determine the confidence intervals for the variance.
It calls test_var and when called by an optimizer,
finds the value of sig2_0 that is chi2.ppf(significance-level)
Parameters
----------
var_test : float
Hypothesized value of the variance
Returns
-------
diff : float
The difference between the log likelihood ratio at var_test and a
pre-specified value. | _ci_limits_var | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _opt_skew(self, nuis_params):
"""
Called by test_skew. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-specified skewness holding
the nuisance parameters constant.
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
skew_data = (((endog - nuis_params[0]) ** 3) /
(nuis_params[1] ** 1.5)) - self.skew0
est_vect = np.column_stack((mu_data, sig_data, skew_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr | Called by test_skew. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-specified skewness holding
the nuisance parameters constant. | _opt_skew | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _opt_kurt(self, nuis_params):
"""
Called by test_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-speified kurtosis holding the
nuisance parameters constant
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
kurt_data = ((((endog - nuis_params[0]) ** 4) / \
(nuis_params[1] ** 2)) - 3) - self.kurt0
est_vect = np.column_stack((mu_data, sig_data, kurt_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1 + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr | Called by test_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-speified kurtosis holding the
nuisance parameters constant | _opt_kurt | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _opt_skew_kurt(self, nuis_params):
"""
Called by test_joint_skew_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
------
llr : float
The log likelihood ratio of a pre-speified skewness and
kurtosis holding the nuisance parameters constant.
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
skew_data = (((endog - nuis_params[0]) ** 3) / \
(nuis_params[1] ** 1.5)) - self.skew0
kurt_data = ((((endog - nuis_params[0]) ** 4) / \
(nuis_params[1] ** 2)) - 3) - self.kurt0
est_vect = np.column_stack((mu_data, sig_data, skew_data, kurt_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr | Called by test_joint_skew_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
------
llr : float
The log likelihood ratio of a pre-speified skewness and
kurtosis holding the nuisance parameters constant. | _opt_skew_kurt | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _ci_limits_skew(self, skew):
"""
Parameters
----------
skew0 : float
Hypothesized value of skewness
Returns
-------
diff : float
The difference between the log likelihood ratio at skew and a
pre-specified value.
"""
return self.test_skew(skew)[0] - self.r0 | Parameters
----------
skew0 : float
Hypothesized value of skewness
Returns
-------
diff : float
The difference between the log likelihood ratio at skew and a
pre-specified value. | _ci_limits_skew | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _ci_limits_kurt(self, kurt):
"""
Parameters
----------
skew0 : float
Hypothesized value of kurtosis
Returns
-------
diff : float
The difference between the log likelihood ratio at kurt and a
pre-specified value.
"""
return self.test_kurt(kurt)[0] - self.r0 | Parameters
----------
skew0 : float
Hypothesized value of kurtosis
Returns
-------
diff : float
The difference between the log likelihood ratio at kurt and a
pre-specified value. | _ci_limits_kurt | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _opt_correl(self, nuis_params, corr0, endog, nobs, x0, weights0):
"""
Parameters
----------
nuis_params : 1darray
Array containing two nuisance means and two nuisance variances
Returns
-------
llr : float
The log-likelihood of the correlation coefficient holding nuisance
parameters constant
"""
mu1_data, mu2_data = (endog - nuis_params[::2]).T
sig1_data = mu1_data ** 2 - nuis_params[1]
sig2_data = mu2_data ** 2 - nuis_params[3]
correl_data = ((mu1_data * mu2_data) - corr0 *
(nuis_params[1] * nuis_params[3]) ** .5)
est_vect = np.column_stack((mu1_data, sig1_data,
mu2_data, sig2_data, correl_data))
eta_star = self._modif_newton(x0, est_vect, weights0)
denom = 1. + np.dot(est_vect, eta_star)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr | Parameters
----------
nuis_params : 1darray
Array containing two nuisance means and two nuisance variances
Returns
-------
llr : float
The log-likelihood of the correlation coefficient holding nuisance
parameters constant | _opt_correl | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def test_mean(self, mu0, return_weights=False):
"""
Returns - 2 x log-likelihood ratio, p-value and weights
for a hypothesis test of the mean.
Parameters
----------
mu0 : float
Mean value to be tested
return_weights : bool
If return_weights is True the function returns
the weights of the observations under the null hypothesis.
Default is False
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of mu0
"""
self.mu0 = mu0
endog = self.endog
nobs = self.nobs
eta_min = (1. - (1. / nobs)) / (self.mu0 - max(endog))
eta_max = (1. - (1. / nobs)) / (self.mu0 - min(endog))
eta_star = optimize.brentq(self._find_eta, eta_min, eta_max)
new_weights = (1. / nobs) * 1. / (1. + eta_star * (endog - self.mu0))
llr = -2 * np.sum(np.log(nobs * new_weights))
if return_weights:
return llr, chi2.sf(llr, 1), new_weights
else:
return llr, chi2.sf(llr, 1) | Returns - 2 x log-likelihood ratio, p-value and weights
for a hypothesis test of the mean.
Parameters
----------
mu0 : float
Mean value to be tested
return_weights : bool
If return_weights is True the function returns
the weights of the observations under the null hypothesis.
Default is False
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of mu0 | test_mean | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def ci_mean(self, sig=.05, method='gamma', epsilon=10 ** -8,
gamma_low=-10 ** 10, gamma_high=10 ** 10):
"""
Returns the confidence interval for the mean.
Parameters
----------
sig : float
significance level. Default is .05
method : str
Root finding method, Can be 'nested-brent' or
'gamma'. Default is 'gamma'
'gamma' Tries to solve for the gamma parameter in the
Lagrange (see Owen pg 22) and then determine the weights.
'nested brent' uses brents method to find the confidence
intervals but must maximize the likelihood ratio on every
iteration.
gamma is generally much faster. If the optimizations does not
converge, try expanding the gamma_high and gamma_low
variable.
gamma_low : float
Lower bound for gamma when finding lower limit.
If function returns f(a) and f(b) must have different signs,
consider lowering gamma_low.
gamma_high : float
Upper bound for gamma when finding upper limit.
If function returns f(a) and f(b) must have different signs,
consider raising gamma_high.
epsilon : float
When using 'nested-brent', amount to decrease (increase)
from the maximum (minimum) of the data when
starting the search. This is to protect against the
likelihood ratio being zero at the maximum (minimum)
value of the data. If data is very small in absolute value
(<10 ``**`` -6) consider shrinking epsilon
When using 'gamma', amount to decrease (increase) the
minimum (maximum) by to start the search for gamma.
If function returns f(a) and f(b) must have different signs,
consider lowering epsilon.
Returns
-------
Interval : tuple
Confidence interval for the mean
"""
endog = self.endog
sig = 1 - sig
if method == 'nested-brent':
self.r0 = chi2.ppf(sig, 1)
middle = np.mean(endog)
epsilon_u = (max(endog) - np.mean(endog)) * epsilon
epsilon_l = (np.mean(endog) - min(endog)) * epsilon
ulim = optimize.brentq(self._ci_limits_mu, middle,
max(endog) - epsilon_u)
llim = optimize.brentq(self._ci_limits_mu, middle,
min(endog) + epsilon_l)
return llim, ulim
if method == 'gamma':
self.r0 = chi2.ppf(sig, 1)
gamma_star_l = optimize.brentq(self._find_gamma, gamma_low,
min(endog) - epsilon)
gamma_star_u = optimize.brentq(self._find_gamma, \
max(endog) + epsilon, gamma_high)
weights_low = ((endog - gamma_star_l) ** -1) / \
np.sum((endog - gamma_star_l) ** -1)
weights_high = ((endog - gamma_star_u) ** -1) / \
np.sum((endog - gamma_star_u) ** -1)
mu_low = np.sum(weights_low * endog)
mu_high = np.sum(weights_high * endog)
return mu_low, mu_high | Returns the confidence interval for the mean.
Parameters
----------
sig : float
significance level. Default is .05
method : str
Root finding method, Can be 'nested-brent' or
'gamma'. Default is 'gamma'
'gamma' Tries to solve for the gamma parameter in the
Lagrange (see Owen pg 22) and then determine the weights.
'nested brent' uses brents method to find the confidence
intervals but must maximize the likelihood ratio on every
iteration.
gamma is generally much faster. If the optimizations does not
converge, try expanding the gamma_high and gamma_low
variable.
gamma_low : float
Lower bound for gamma when finding lower limit.
If function returns f(a) and f(b) must have different signs,
consider lowering gamma_low.
gamma_high : float
Upper bound for gamma when finding upper limit.
If function returns f(a) and f(b) must have different signs,
consider raising gamma_high.
epsilon : float
When using 'nested-brent', amount to decrease (increase)
from the maximum (minimum) of the data when
starting the search. This is to protect against the
likelihood ratio being zero at the maximum (minimum)
value of the data. If data is very small in absolute value
(<10 ``**`` -6) consider shrinking epsilon
When using 'gamma', amount to decrease (increase) the
minimum (maximum) by to start the search for gamma.
If function returns f(a) and f(b) must have different signs,
consider lowering epsilon.
Returns
-------
Interval : tuple
Confidence interval for the mean | ci_mean | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def test_var(self, sig2_0, return_weights=False):
"""
Returns -2 x log-likelihood ratio and the p-value for the
hypothesized variance
Parameters
----------
sig2_0 : float
Hypothesized variance to be tested
return_weights : bool
If True, returns the weights that maximize the
likelihood of observing sig2_0. Default is False
Returns
-------
test_results : tuple
The log-likelihood ratio and the p_value of sig2_0
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> random_numbers = np.random.standard_normal(1000)*100
>>> el_analysis = sm.emplike.DescStat(random_numbers)
>>> hyp_test = el_analysis.test_var(9500)
"""
self.sig2_0 = sig2_0
mu_max = max(self.endog)
mu_min = min(self.endog)
llr = optimize.fminbound(self._opt_var, mu_min, mu_max, \
full_output=1)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
else:
return llr, p_val | Returns -2 x log-likelihood ratio and the p-value for the
hypothesized variance
Parameters
----------
sig2_0 : float
Hypothesized variance to be tested
return_weights : bool
If True, returns the weights that maximize the
likelihood of observing sig2_0. Default is False
Returns
-------
test_results : tuple
The log-likelihood ratio and the p_value of sig2_0
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> random_numbers = np.random.standard_normal(1000)*100
>>> el_analysis = sm.emplike.DescStat(random_numbers)
>>> hyp_test = el_analysis.test_var(9500) | test_var | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def ci_var(self, lower_bound=None, upper_bound=None, sig=.05):
"""
Returns the confidence interval for the variance.
Parameters
----------
lower_bound : float
The minimum value the lower confidence interval can
take. The p-value from test_var(lower_bound) must be lower
than 1 - significance level. Default is .99 confidence
limit assuming normality
upper_bound : float
The maximum value the upper confidence interval
can take. The p-value from test_var(upper_bound) must be lower
than 1 - significance level. Default is .99 confidence
limit assuming normality
sig : float
The significance level. Default is .05
Returns
-------
Interval : tuple
Confidence interval for the variance
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> random_numbers = np.random.standard_normal(100)
>>> el_analysis = sm.emplike.DescStat(random_numbers)
>>> el_analysis.ci_var()
(0.7539322567470305, 1.229998852496268)
>>> el_analysis.ci_var(.5, 2)
(0.7539322567469926, 1.2299988524962664)
Notes
-----
If the function returns the error f(a) and f(b) must have
different signs, consider lowering lower_bound and raising
upper_bound.
"""
endog = self.endog
if upper_bound is None:
upper_bound = ((self.nobs - 1) * endog.var()) / \
(chi2.ppf(.0001, self.nobs - 1))
if lower_bound is None:
lower_bound = ((self.nobs - 1) * endog.var()) / \
(chi2.ppf(.9999, self.nobs - 1))
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_var, lower_bound, endog.var())
ulim = optimize.brentq(self._ci_limits_var, endog.var(), upper_bound)
return llim, ulim | Returns the confidence interval for the variance.
Parameters
----------
lower_bound : float
The minimum value the lower confidence interval can
take. The p-value from test_var(lower_bound) must be lower
than 1 - significance level. Default is .99 confidence
limit assuming normality
upper_bound : float
The maximum value the upper confidence interval
can take. The p-value from test_var(upper_bound) must be lower
than 1 - significance level. Default is .99 confidence
limit assuming normality
sig : float
The significance level. Default is .05
Returns
-------
Interval : tuple
Confidence interval for the variance
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> random_numbers = np.random.standard_normal(100)
>>> el_analysis = sm.emplike.DescStat(random_numbers)
>>> el_analysis.ci_var()
(0.7539322567470305, 1.229998852496268)
>>> el_analysis.ci_var(.5, 2)
(0.7539322567469926, 1.2299988524962664)
Notes
-----
If the function returns the error f(a) and f(b) must have
different signs, consider lowering lower_bound and raising
upper_bound. | ci_var | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def plot_contour(self, mu_low, mu_high, var_low, var_high, mu_step,
var_step,
levs=[.2, .1, .05, .01, .001]):
"""
Returns a plot of the confidence region for a univariate
mean and variance.
Parameters
----------
mu_low : float
Lowest value of the mean to plot
mu_high : float
Highest value of the mean to plot
var_low : float
Lowest value of the variance to plot
var_high : float
Highest value of the variance to plot
mu_step : float
Increments to evaluate the mean
var_step : float
Increments to evaluate the mean
levs : list
Which values of significance the contour lines will be drawn.
Default is [.2, .1, .05, .01, .001]
Returns
-------
Figure
The contour plot
"""
fig, ax = utils.create_mpl_ax()
ax.set_ylabel('Variance')
ax.set_xlabel('Mean')
mu_vect = list(np.arange(mu_low, mu_high, mu_step))
var_vect = list(np.arange(var_low, var_high, var_step))
z = []
for sig0 in var_vect:
self.sig2_0 = sig0
for mu0 in mu_vect:
z.append(self._opt_var(mu0, pval=True))
z = np.asarray(z).reshape(len(var_vect), len(mu_vect))
ax.contour(mu_vect, var_vect, z, levels=levs)
return fig | Returns a plot of the confidence region for a univariate
mean and variance.
Parameters
----------
mu_low : float
Lowest value of the mean to plot
mu_high : float
Highest value of the mean to plot
var_low : float
Lowest value of the variance to plot
var_high : float
Highest value of the variance to plot
mu_step : float
Increments to evaluate the mean
var_step : float
Increments to evaluate the mean
levs : list
Which values of significance the contour lines will be drawn.
Default is [.2, .1, .05, .01, .001]
Returns
-------
Figure
The contour plot | plot_contour | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def test_skew(self, skew0, return_weights=False):
"""
Returns -2 x log-likelihood and p-value for the hypothesized
skewness.
Parameters
----------
skew0 : float
Skewness value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p_value of skew0
"""
self.skew0 = skew0
start_nuisance = np.array([self.endog.mean(),
self.endog.var()])
llr = optimize.fmin_powell(self._opt_skew, start_nuisance,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val | Returns -2 x log-likelihood and p-value for the hypothesized
skewness.
Parameters
----------
skew0 : float
Skewness value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p_value of skew0 | test_skew | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def test_kurt(self, kurt0, return_weights=False):
"""
Returns -2 x log-likelihood and the p-value for the hypothesized
kurtosis.
Parameters
----------
kurt0 : float
Kurtosis value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of kurt0
"""
self.kurt0 = kurt0
start_nuisance = np.array([self.endog.mean(),
self.endog.var()])
llr = optimize.fmin_powell(self._opt_kurt, start_nuisance,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val | Returns -2 x log-likelihood and the p-value for the hypothesized
kurtosis.
Parameters
----------
kurt0 : float
Kurtosis value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of kurt0 | test_kurt | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def test_joint_skew_kurt(self, skew0, kurt0, return_weights=False):
"""
Returns - 2 x log-likelihood and the p-value for the joint
hypothesis test for skewness and kurtosis
Parameters
----------
skew0 : float
Skewness value to be tested
kurt0 : float
Kurtosis value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of the joint hypothesis test.
"""
self.skew0 = skew0
self.kurt0 = kurt0
start_nuisance = np.array([self.endog.mean(),
self.endog.var()])
llr = optimize.fmin_powell(self._opt_skew_kurt, start_nuisance,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 2)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val | Returns - 2 x log-likelihood and the p-value for the joint
hypothesis test for skewness and kurtosis
Parameters
----------
skew0 : float
Skewness value to be tested
kurt0 : float
Kurtosis value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of the joint hypothesis test. | test_joint_skew_kurt | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def ci_skew(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence interval for skewness.
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value of skewness the upper limit can be.
Default is .99 confidence limit assuming normality.
lower_bound : float
Minimum value of skewness the lower limit can be.
Default is .99 confidence level assuming normality.
Returns
-------
Interval : tuple
Confidence interval for the skewness
Notes
-----
If function returns f(a) and f(b) must have different signs, consider
expanding lower and upper bounds
"""
nobs = self.nobs
endog = self.endog
if upper_bound is None:
upper_bound = skew(endog) + \
2.5 * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5
if lower_bound is None:
lower_bound = skew(endog) - \
2.5 * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_skew, lower_bound, skew(endog))
ulim = optimize.brentq(self._ci_limits_skew, skew(endog), upper_bound)
return llim, ulim | Returns the confidence interval for skewness.
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value of skewness the upper limit can be.
Default is .99 confidence limit assuming normality.
lower_bound : float
Minimum value of skewness the lower limit can be.
Default is .99 confidence level assuming normality.
Returns
-------
Interval : tuple
Confidence interval for the skewness
Notes
-----
If function returns f(a) and f(b) must have different signs, consider
expanding lower and upper bounds | ci_skew | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def ci_kurt(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence interval for kurtosis.
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value of kurtosis the upper limit can be.
Default is .99 confidence limit assuming normality.
lower_bound : float
Minimum value of kurtosis the lower limit can be.
Default is .99 confidence limit assuming normality.
Returns
-------
Interval : tuple
Lower and upper confidence limit
Notes
-----
For small n, upper_bound and lower_bound may have to be
provided by the user. Consider using test_kurt to find
values close to the desired significance level.
If function returns f(a) and f(b) must have different signs, consider
expanding the bounds.
"""
endog = self.endog
nobs = self.nobs
if upper_bound is None:
upper_bound = kurtosis(endog) + \
(2.5 * (2. * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5) * \
(((nobs ** 2.) - 1.) / ((nobs - 3.) *\
(nobs + 5.))) ** .5)
if lower_bound is None:
lower_bound = kurtosis(endog) - \
(2.5 * (2. * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5) * \
(((nobs ** 2.) - 1.) / ((nobs - 3.) *\
(nobs + 5.))) ** .5)
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_kurt, lower_bound, \
kurtosis(endog))
ulim = optimize.brentq(self._ci_limits_kurt, kurtosis(endog), \
upper_bound)
return llim, ulim | Returns the confidence interval for kurtosis.
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value of kurtosis the upper limit can be.
Default is .99 confidence limit assuming normality.
lower_bound : float
Minimum value of kurtosis the lower limit can be.
Default is .99 confidence limit assuming normality.
Returns
-------
Interval : tuple
Lower and upper confidence limit
Notes
-----
For small n, upper_bound and lower_bound may have to be
provided by the user. Consider using test_kurt to find
values close to the desired significance level.
If function returns f(a) and f(b) must have different signs, consider
expanding the bounds. | ci_kurt | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def mv_test_mean(self, mu_array, return_weights=False):
"""
Returns -2 x log likelihood and the p-value
for a multivariate hypothesis test of the mean
Parameters
----------
mu_array : 1d array
Hypothesized values for the mean. Must have same number of
elements as columns in endog
return_weights : bool
If True, returns the weights that maximize the
likelihood of mu_array. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value for mu_array
"""
endog = self.endog
nobs = self.nobs
if len(mu_array) != endog.shape[1]:
raise ValueError('mu_array must have the same number of '
'elements as the columns of the data.')
mu_array = mu_array.reshape(1, endog.shape[1])
means = np.ones((endog.shape[0], endog.shape[1]))
means = mu_array * means
est_vect = endog - means
start_vals = 1. / nobs * np.ones(endog.shape[1])
eta_star = self._modif_newton(start_vals, est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1 + np.dot(eta_star, est_vect.T)
self.new_weights = 1 / nobs * 1 / denom
llr = -2 * np.sum(np.log(nobs * self.new_weights))
p_val = chi2.sf(llr, mu_array.shape[1])
if return_weights:
return llr, p_val, self.new_weights.T
else:
return llr, p_val | Returns -2 x log likelihood and the p-value
for a multivariate hypothesis test of the mean
Parameters
----------
mu_array : 1d array
Hypothesized values for the mean. Must have same number of
elements as columns in endog
return_weights : bool
If True, returns the weights that maximize the
likelihood of mu_array. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value for mu_array | mv_test_mean | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def test_corr(self, corr0, return_weights=0):
"""
Returns -2 x log-likelihood ratio and p-value for the
correlation coefficient between 2 variables
Parameters
----------
corr0 : float
Hypothesized value to be tested
return_weights : bool
If true, returns the weights that maximize
the log-likelihood at the hypothesized value
"""
nobs = self.nobs
endog = self.endog
if endog.shape[1] != 2:
raise NotImplementedError('Correlation matrix not yet implemented')
nuis0 = np.array([endog[:, 0].mean(),
endog[:, 0].var(),
endog[:, 1].mean(),
endog[:, 1].var()])
x0 = np.zeros(5)
weights0 = np.array([1. / nobs] * int(nobs))
args = (corr0, endog, nobs, x0, weights0)
llr = optimize.fmin(self._opt_correl, nuis0, args=args,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val | Returns -2 x log-likelihood ratio and p-value for the
correlation coefficient between 2 variables
Parameters
----------
corr0 : float
Hypothesized value to be tested
return_weights : bool
If true, returns the weights that maximize
the log-likelihood at the hypothesized value | test_corr | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def ci_corr(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence intervals for the correlation coefficient
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value the upper confidence limit can be.
Default is 99% confidence limit assuming normality.
lower_bound : float
Minimum value the lower confidence limit can be.
Default is 99% confidence limit assuming normality.
Returns
-------
interval : tuple
Confidence interval for the correlation
"""
endog = self.endog
nobs = self.nobs
self.r0 = chi2.ppf(1 - sig, 1)
point_est = np.corrcoef(endog[:, 0], endog[:, 1])[0, 1]
if upper_bound is None:
upper_bound = min(.999, point_est + \
2.5 * ((1. - point_est ** 2.) / \
(nobs - 2.)) ** .5)
if lower_bound is None:
lower_bound = max(- .999, point_est - \
2.5 * (np.sqrt((1. - point_est ** 2.) / \
(nobs - 2.))))
llim = optimize.brenth(self._ci_limits_corr, lower_bound, point_est)
ulim = optimize.brenth(self._ci_limits_corr, point_est, upper_bound)
return llim, ulim | Returns the confidence intervals for the correlation coefficient
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value the upper confidence limit can be.
Default is 99% confidence limit assuming normality.
lower_bound : float
Minimum value the lower confidence limit can be.
Default is 99% confidence limit assuming normality.
Returns
-------
interval : tuple
Confidence interval for the correlation | ci_corr | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def drop_missing(Y, X=None, axis=1):
"""
Returns views on the arrays Y and X where missing observations are dropped.
Y : array_like
X : array_like, optional
axis : int
Axis along which to look for missing observations. Default is 1, ie.,
observations in rows.
Returns
-------
Y : ndarray
All Y where the
X : ndarray
Notes
-----
If either Y or X is 1d, it is reshaped to be 2d.
"""
Y = np.asarray(Y)
if Y.ndim == 1:
Y = Y[:, None]
if X is not None:
X = np.array(X)
if X.ndim == 1:
X = X[:, None]
keepidx = np.logical_and(~np.isnan(Y).any(axis),
~np.isnan(X).any(axis))
return Y[keepidx], X[keepidx]
else:
keepidx = ~np.isnan(Y).any(axis)
return Y[keepidx] | Returns views on the arrays Y and X where missing observations are dropped.
Y : array_like
X : array_like, optional
axis : int
Axis along which to look for missing observations. Default is 1, ie.,
observations in rows.
Returns
-------
Y : ndarray
All Y where the
X : ndarray
Notes
-----
If either Y or X is 1d, it is reshaped to be 2d. | drop_missing | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def categorical(data, col=None, dictnames=False, drop=False):
"""
Construct a dummy matrix from categorical variables
.. deprecated:: 0.12
Use pandas.get_dummies instead.
Parameters
----------
data : array_like
An array, Series or DataFrame. This can be either a 1d vector of
the categorical variable or a 2d array with the column specifying
the categorical variable specified by the col argument.
col : {str, int, None}
If data is a DataFrame col must in a column of data. If data is a
Series, col must be either the name of the Series or None. For arrays,
`col` can be an int that is the (zero-based) column index
number. `col` can only be None for a 1d array. The default is None.
dictnames : bool, optional
If True, a dictionary mapping the column number to the categorical
name is returned. Used to have information about plain arrays.
drop : bool
Whether or not keep the categorical variable in the returned matrix.
Returns
-------
dummy_matrix : array_like
A matrix of dummy (indicator/binary) float variables for the
categorical data.
dictnames : dict[int, str], optional
Mapping between column numbers and categorical names.
Notes
-----
This returns a dummy variable for *each* distinct variable. If a
a DaataFrame is provided, the names for the new variable is the
old variable name - underscore - category name. So if the a variable
'vote' had answers as 'yes' or 'no' then the returned array would have to
new variables-- 'vote_yes' and 'vote_no'. There is currently
no name checking.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
Univariate examples
>>> import string
>>> string_var = [string.ascii_lowercase[0:5],
... string.ascii_lowercase[5:10],
... string.ascii_lowercase[10:15],
... string.ascii_lowercase[15:20],
... string.ascii_lowercase[20:25]]
>>> string_var *= 5
>>> string_var = np.asarray(sorted(string_var))
>>> design = sm.tools.categorical(string_var, drop=True)
Or for a numerical categorical variable
>>> instr = np.floor(np.arange(10,60, step=2)/10)
>>> design = sm.tools.categorical(instr, drop=True)
With a structured array
>>> num = np.random.randn(25,2)
>>> struct_ar = np.zeros((25,1),
... dtype=[('var1', 'f4'),('var2', 'f4'),
... ('instrument','f4'),('str_instr','a5')])
>>> struct_ar['var1'] = num[:,0][:,None]
>>> struct_ar['var2'] = num[:,1][:,None]
>>> struct_ar['instrument'] = instr[:,None]
>>> struct_ar['str_instr'] = string_var[:,None]
>>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)
Or
>>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)
"""
raise NotImplementedError("categorical has been removed") | Construct a dummy matrix from categorical variables
.. deprecated:: 0.12
Use pandas.get_dummies instead.
Parameters
----------
data : array_like
An array, Series or DataFrame. This can be either a 1d vector of
the categorical variable or a 2d array with the column specifying
the categorical variable specified by the col argument.
col : {str, int, None}
If data is a DataFrame col must in a column of data. If data is a
Series, col must be either the name of the Series or None. For arrays,
`col` can be an int that is the (zero-based) column index
number. `col` can only be None for a 1d array. The default is None.
dictnames : bool, optional
If True, a dictionary mapping the column number to the categorical
name is returned. Used to have information about plain arrays.
drop : bool
Whether or not keep the categorical variable in the returned matrix.
Returns
-------
dummy_matrix : array_like
A matrix of dummy (indicator/binary) float variables for the
categorical data.
dictnames : dict[int, str], optional
Mapping between column numbers and categorical names.
Notes
-----
This returns a dummy variable for *each* distinct variable. If a
a DaataFrame is provided, the names for the new variable is the
old variable name - underscore - category name. So if the a variable
'vote' had answers as 'yes' or 'no' then the returned array would have to
new variables-- 'vote_yes' and 'vote_no'. There is currently
no name checking.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
Univariate examples
>>> import string
>>> string_var = [string.ascii_lowercase[0:5],
... string.ascii_lowercase[5:10],
... string.ascii_lowercase[10:15],
... string.ascii_lowercase[15:20],
... string.ascii_lowercase[20:25]]
>>> string_var *= 5
>>> string_var = np.asarray(sorted(string_var))
>>> design = sm.tools.categorical(string_var, drop=True)
Or for a numerical categorical variable
>>> instr = np.floor(np.arange(10,60, step=2)/10)
>>> design = sm.tools.categorical(instr, drop=True)
With a structured array
>>> num = np.random.randn(25,2)
>>> struct_ar = np.zeros((25,1),
... dtype=[('var1', 'f4'),('var2', 'f4'),
... ('instrument','f4'),('str_instr','a5')])
>>> struct_ar['var1'] = num[:,0][:,None]
>>> struct_ar['var2'] = num[:,1][:,None]
>>> struct_ar['instrument'] = instr[:,None]
>>> struct_ar['str_instr'] = string_var[:,None]
>>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)
Or
>>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True) | categorical | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def add_constant(data, prepend=True, has_constant='skip'):
"""
Add a column of ones to an array.
Parameters
----------
data : array_like
A column-ordered design matrix.
prepend : bool
If true, the constant is in the first column. Else the constant is
appended (last column).
has_constant : str {'raise', 'add', 'skip'}
Behavior if ``data`` already has a constant. The default will return
data without adding another constant. If 'raise', will raise an
error if any column has a constant value. Using 'add' will add a
column of 1s if a constant column is present.
Returns
-------
array_like
The original values with a constant (column of ones) as the first or
last column. Returned value type depends on input type.
Notes
-----
When the input is a pandas Series or DataFrame, the added column's name
is 'const'.
"""
if _is_using_pandas(data, None):
from statsmodels.tsa.tsatools import add_trend
return add_trend(data, trend='c', prepend=prepend, has_constant=has_constant)
# Special case for NumPy
x = np.asarray(data)
ndim = x.ndim
if ndim == 1:
x = x[:, None]
elif x.ndim > 2:
raise ValueError('Only implemented for 2-dimensional arrays')
is_nonzero_const = np.ptp(x, axis=0) == 0
is_nonzero_const &= np.all(x != 0.0, axis=0)
if is_nonzero_const.any():
if has_constant == 'skip':
return x
elif has_constant == 'raise':
if ndim == 1:
raise ValueError("data is constant.")
else:
columns = np.arange(x.shape[1])
cols = ",".join([str(c) for c in columns[is_nonzero_const]])
raise ValueError(f"Column(s) {cols} are constant.")
x = [np.ones(x.shape[0]), x]
x = x if prepend else x[::-1]
return np.column_stack(x) | Add a column of ones to an array.
Parameters
----------
data : array_like
A column-ordered design matrix.
prepend : bool
If true, the constant is in the first column. Else the constant is
appended (last column).
has_constant : str {'raise', 'add', 'skip'}
Behavior if ``data`` already has a constant. The default will return
data without adding another constant. If 'raise', will raise an
error if any column has a constant value. Using 'add' will add a
column of 1s if a constant column is present.
Returns
-------
array_like
The original values with a constant (column of ones) as the first or
last column. Returned value type depends on input type.
Notes
-----
When the input is a pandas Series or DataFrame, the added column's name
is 'const'. | add_constant | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def isestimable(c, d):
"""
True if (Q, P) contrast `c` is estimable for (N, P) design `d`.
From an Q x P contrast matrix `C` and an N x P design matrix `D`, checks if
the contrast `C` is estimable by looking at the rank of ``vstack([C,D])``
and verifying it is the same as the rank of `D`.
Parameters
----------
c : array_like
A contrast matrix with shape (Q, P). If 1 dimensional assume shape is
(1, P).
d : array_like
The design matrix, (N, P).
Returns
-------
bool
True if the contrast `c` is estimable on design `d`.
Examples
--------
>>> d = np.array([[1, 1, 1, 0, 0, 0],
... [0, 0, 0, 1, 1, 1],
... [1, 1, 1, 1, 1, 1]]).T
>>> isestimable([1, 0, 0], d)
False
>>> isestimable([1, -1, 0], d)
True
"""
c = array_like(c, 'c', maxdim=2)
d = array_like(d, 'd', ndim=2)
c = c[None, :] if c.ndim == 1 else c
if c.shape[1] != d.shape[1]:
raise ValueError('Contrast should have %d columns' % d.shape[1])
new = np.vstack([c, d])
if np.linalg.matrix_rank(new) != np.linalg.matrix_rank(d):
return False
return True | True if (Q, P) contrast `c` is estimable for (N, P) design `d`.
From an Q x P contrast matrix `C` and an N x P design matrix `D`, checks if
the contrast `C` is estimable by looking at the rank of ``vstack([C,D])``
and verifying it is the same as the rank of `D`.
Parameters
----------
c : array_like
A contrast matrix with shape (Q, P). If 1 dimensional assume shape is
(1, P).
d : array_like
The design matrix, (N, P).
Returns
-------
bool
True if the contrast `c` is estimable on design `d`.
Examples
--------
>>> d = np.array([[1, 1, 1, 0, 0, 0],
... [0, 0, 0, 1, 1, 1],
... [1, 1, 1, 1, 1, 1]]).T
>>> isestimable([1, 0, 0], d)
False
>>> isestimable([1, -1, 0], d)
True | isestimable | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def pinv_extended(x, rcond=1e-15):
"""
Return the pinv of an array X as well as the singular values
used in computation.
Code adapted from numpy.
"""
x = np.asarray(x)
x = x.conjugate()
u, s, vt = np.linalg.svd(x, False)
s_orig = np.copy(s)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond * np.maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = np.dot(np.transpose(vt), np.multiply(s[:, np.newaxis],
np.transpose(u)))
return res, s_orig | Return the pinv of an array X as well as the singular values
used in computation.
Code adapted from numpy. | pinv_extended | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def recipr(x):
"""
Reciprocal of an array with entries less than or equal to 0 set to 0.
Parameters
----------
x : array_like
The input array.
Returns
-------
ndarray
The array with 0-filled reciprocals.
"""
x = np.asarray(x)
out = np.zeros_like(x, dtype=np.float64)
nans = np.isnan(x.flat)
pos = ~nans
pos[pos] = pos[pos] & (x.flat[pos] > 0)
out.flat[pos] = 1.0 / x.flat[pos]
out.flat[nans] = np.nan
return out | Reciprocal of an array with entries less than or equal to 0 set to 0.
Parameters
----------
x : array_like
The input array.
Returns
-------
ndarray
The array with 0-filled reciprocals. | recipr | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def recipr0(x):
"""
Reciprocal of an array with entries less than 0 set to 0.
Parameters
----------
x : array_like
The input array.
Returns
-------
ndarray
The array with 0-filled reciprocals.
"""
x = np.asarray(x)
out = np.zeros_like(x, dtype=np.float64)
nans = np.isnan(x.flat)
non_zero = ~nans
non_zero[non_zero] = non_zero[non_zero] & (x.flat[non_zero] != 0)
out.flat[non_zero] = 1.0 / x.flat[non_zero]
out.flat[nans] = np.nan
return out | Reciprocal of an array with entries less than 0 set to 0.
Parameters
----------
x : array_like
The input array.
Returns
-------
ndarray
The array with 0-filled reciprocals. | recipr0 | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def clean0(matrix):
"""
Erase columns of zeros: can save some time in pseudoinverse.
Parameters
----------
matrix : ndarray
The array to clean.
Returns
-------
ndarray
The cleaned array.
"""
colsum = np.add.reduce(matrix**2, 0)
val = [matrix[:, i] for i in np.flatnonzero(colsum)]
return np.array(np.transpose(val)) | Erase columns of zeros: can save some time in pseudoinverse.
Parameters
----------
matrix : ndarray
The array to clean.
Returns
-------
ndarray
The cleaned array. | clean0 | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def fullrank(x, r=None):
"""
Return an array whose column span is the same as x.
Parameters
----------
x : ndarray
The array to adjust, 2d.
r : int, optional
The rank of x. If not provided, determined by `np.linalg.matrix_rank`.
Returns
-------
ndarray
The array adjusted to have full rank.
Notes
-----
If the rank of x is known it can be specified as r -- no check
is made to ensure that this really is the rank of x.
"""
if r is None:
r = np.linalg.matrix_rank(x)
v, d, u = np.linalg.svd(x, full_matrices=False)
order = np.argsort(d)
order = order[::-1]
value = []
for i in range(r):
value.append(v[:, order[i]])
return np.asarray(np.transpose(value)).astype(np.float64) | Return an array whose column span is the same as x.
Parameters
----------
x : ndarray
The array to adjust, 2d.
r : int, optional
The rank of x. If not provided, determined by `np.linalg.matrix_rank`.
Returns
-------
ndarray
The array adjusted to have full rank.
Notes
-----
If the rank of x is known it can be specified as r -- no check
is made to ensure that this really is the rank of x. | fullrank | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def unsqueeze(data, axis, oldshape):
"""
Unsqueeze a collapsed array.
Parameters
----------
data : ndarray
The data to unsqueeze.
axis : int
The axis to unsqueeze.
oldshape : tuple[int]
The original shape before the squeeze or reduce operation.
Returns
-------
ndarray
The unsqueezed array.
Examples
--------
>>> from numpy import mean
>>> from numpy.random import standard_normal
>>> x = standard_normal((3,4,5))
>>> m = mean(x, axis=1)
>>> m.shape
(3, 5)
>>> m = unsqueeze(m, 1, x.shape)
>>> m.shape
(3, 1, 5)
>>>
"""
newshape = list(oldshape)
newshape[axis] = 1
return data.reshape(newshape) | Unsqueeze a collapsed array.
Parameters
----------
data : ndarray
The data to unsqueeze.
axis : int
The axis to unsqueeze.
oldshape : tuple[int]
The original shape before the squeeze or reduce operation.
Returns
-------
ndarray
The unsqueezed array.
Examples
--------
>>> from numpy import mean
>>> from numpy.random import standard_normal
>>> x = standard_normal((3,4,5))
>>> m = mean(x, axis=1)
>>> m.shape
(3, 5)
>>> m = unsqueeze(m, 1, x.shape)
>>> m.shape
(3, 1, 5)
>>> | unsqueeze | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def nan_dot(A, B):
"""
Returns np.dot(left_matrix, right_matrix) with the convention that
nan * 0 = 0 and nan * x = nan if x != 0.
Parameters
----------
A, B : ndarray
"""
# Find out who should be nan due to nan * nonzero
should_be_nan_1 = np.dot(np.isnan(A), (B != 0))
should_be_nan_2 = np.dot((A != 0), np.isnan(B))
should_be_nan = should_be_nan_1 + should_be_nan_2
# Multiply after setting all nan to 0
# This is what happens if there were no nan * nonzero conflicts
C = np.dot(np.nan_to_num(A), np.nan_to_num(B))
C[should_be_nan] = np.nan
return C | Returns np.dot(left_matrix, right_matrix) with the convention that
nan * 0 = 0 and nan * x = nan if x != 0.
Parameters
----------
A, B : ndarray | nan_dot | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def maybe_unwrap_results(results):
"""
Gets raw results back from wrapped results.
Can be used in plotting functions or other post-estimation type
routines.
"""
return getattr(results, '_results', results) | Gets raw results back from wrapped results.
Can be used in plotting functions or other post-estimation type
routines. | maybe_unwrap_results | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def _ensure_2d(x, ndarray=False):
"""
Parameters
----------
x : ndarray, Series, DataFrame or None
Input to verify dimensions, and to transform as necesary
ndarray : bool
Flag indicating whether to always return a NumPy array. Setting False
will return an pandas DataFrame when the input is a Series or a
DataFrame.
Returns
-------
out : ndarray, DataFrame or None
array or DataFrame with 2 dimensiona. One dimensional arrays are
returned as nobs by 1. None is returned if x is None.
names : list of str or None
list containing variables names when the input is a pandas datatype.
Returns None if the input is an ndarray.
Notes
-----
Accepts None for simplicity
"""
if x is None:
return x
is_pandas = _is_using_pandas(x, None)
if x.ndim == 2:
if is_pandas:
return x, x.columns
else:
return x, None
elif x.ndim > 2:
raise ValueError('x mst be 1 or 2-dimensional.')
name = x.name if is_pandas else None
if ndarray:
return np.asarray(x)[:, None], name
else:
return pd.DataFrame(x), name | Parameters
----------
x : ndarray, Series, DataFrame or None
Input to verify dimensions, and to transform as necesary
ndarray : bool
Flag indicating whether to always return a NumPy array. Setting False
will return an pandas DataFrame when the input is a Series or a
DataFrame.
Returns
-------
out : ndarray, DataFrame or None
array or DataFrame with 2 dimensiona. One dimensional arrays are
returned as nobs by 1. None is returned if x is None.
names : list of str or None
list containing variables names when the input is a pandas datatype.
Returns None if the input is an ndarray.
Notes
-----
Accepts None for simplicity | _ensure_2d | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def matrix_rank(m, tol=None, method="qr"):
"""
Matrix rank calculation using QR or SVD
Parameters
----------
m : array_like
A 2-d array-like object to test
tol : float, optional
The tolerance to use when testing the matrix rank. If not provided
an appropriate value is selected.
method : {"ip", "qr", "svd"}
The method used. "ip" uses the inner-product of a normalized version
of m and then computes the rank using NumPy's matrix_rank.
"qr" uses a QR decomposition and is the default. "svd" defers to
NumPy's matrix_rank.
Returns
-------
int
The rank of m.
Notes
-----
When using a QR factorization, the rank is determined by the number of
elements on the leading diagonal of the R matrix that are above tol
in absolute value.
"""
m = array_like(m, "m", ndim=2)
if method == "ip":
m = m[:, np.any(m != 0, axis=0)]
m = m / np.sqrt((m ** 2).sum(0))
m = m.T @ m
return np.linalg.matrix_rank(m, tol=tol, hermitian=True)
elif method == "qr":
r, = scipy.linalg.qr(m, mode="r")
abs_diag = np.abs(np.diag(r))
if tol is None:
tol = abs_diag[0] * m.shape[1] * np.finfo(float).eps
return int((abs_diag > tol).sum())
else:
return np.linalg.matrix_rank(m, tol=tol) | Matrix rank calculation using QR or SVD
Parameters
----------
m : array_like
A 2-d array-like object to test
tol : float, optional
The tolerance to use when testing the matrix rank. If not provided
an appropriate value is selected.
method : {"ip", "qr", "svd"}
The method used. "ip" uses the inner-product of a normalized version
of m and then computes the rank using NumPy's matrix_rank.
"qr" uses a QR decomposition and is the default. "svd" defers to
NumPy's matrix_rank.
Returns
-------
int
The rank of m.
Notes
-----
When using a QR factorization, the rank is determined by the number of
elements on the leading diagonal of the R matrix that are above tol
in absolute value. | matrix_rank | python | statsmodels/statsmodels | statsmodels/tools/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tools.py | BSD-3-Clause |
def _generate_url(func, stable):
"""
Parse inputs and return a correctly formatted URL or raises ValueError
if the input is not understandable
"""
url = BASE_URL
if stable:
url += 'stable/'
else:
url += 'devel/'
if func is None:
return url
elif isinstance(func, str):
url += 'search.html?'
url += urlencode({'q': func})
url += '&check_keywords=yes&area=default'
else:
try:
func = func
func_name = func.__name__
func_module = func.__module__
if not func_module.startswith('statsmodels.'):
raise ValueError('Function must be from statsmodels')
url += 'generated/'
url += func_module + '.' + func_name + '.html'
except AttributeError:
raise ValueError('Input not understood')
return url | Parse inputs and return a correctly formatted URL or raises ValueError
if the input is not understandable | _generate_url | python | statsmodels/statsmodels | statsmodels/tools/web.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/web.py | BSD-3-Clause |
def webdoc(func=None, stable=None):
"""
Opens a browser and displays online documentation
Parameters
----------
func : {str, callable}
Either a string to search the documentation or a function
stable : bool
Flag indicating whether to use the stable documentation (True) or
the development documentation (False). If not provided, opens
the stable documentation if the current version of statsmodels is a
release
Examples
--------
>>> import statsmodels.api as sm
Documentation site
>>> sm.webdoc()
Search for glm in docs
>>> sm.webdoc('glm')
Go to current generated help for OLS
>>> sm.webdoc(sm.OLS, stable=False)
Notes
-----
By default, open stable documentation if the current version of
statsmodels is a release. Otherwise opens the development documentation.
Uses the default system browser.
"""
stable = __version__ if 'dev' not in __version__ else stable
url_or_error = _generate_url(func, stable)
webbrowser.open(url_or_error)
return None | Opens a browser and displays online documentation
Parameters
----------
func : {str, callable}
Either a string to search the documentation or a function
stable : bool
Flag indicating whether to use the stable documentation (True) or
the development documentation (False). If not provided, opens
the stable documentation if the current version of statsmodels is a
release
Examples
--------
>>> import statsmodels.api as sm
Documentation site
>>> sm.webdoc()
Search for glm in docs
>>> sm.webdoc('glm')
Go to current generated help for OLS
>>> sm.webdoc(sm.OLS, stable=False)
Notes
-----
By default, open stable documentation if the current version of
statsmodels is a release. Otherwise opens the development documentation.
Uses the default system browser. | webdoc | python | statsmodels/statsmodels | statsmodels/tools/web.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/web.py | BSD-3-Clause |
def bunch_factory(attribute, columns):
"""
Generates a special purpose Bunch class
Parameters
----------
attribute: str
Attribute to access when splitting
columns: List[str]
List of names to use when splitting the columns of attribute
Notes
-----
After the class is initialized as a Bunch, the columne of attribute
are split so that Bunch has the keys in columns and
bunch[column[i]] = bunch[attribute][:, i]
"""
class FactoryBunch(Bunch):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(self, attribute):
raise AttributeError('{} is required and must be passed to '
'the constructor'.format(attribute))
for i, att in enumerate(columns):
self[att] = getattr(self, attribute)[:, i]
return FactoryBunch | Generates a special purpose Bunch class
Parameters
----------
attribute: str
Attribute to access when splitting
columns: List[str]
List of names to use when splitting the columns of attribute
Notes
-----
After the class is initialized as a Bunch, the columne of attribute
are split so that Bunch has the keys in columns and
bunch[column[i]] = bunch[attribute][:, i] | bunch_factory | python | statsmodels/statsmodels | statsmodels/tools/testing.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/testing.py | BSD-3-Clause |
def logdet_symm(m, check_symm=False):
"""
Return log(det(m)) asserting positive definiteness of m.
Parameters
----------
m : array_like
2d array that is positive-definite (and symmetric)
Returns
-------
logdet : float
The log-determinant of m.
"""
from scipy import linalg
if check_symm:
if not np.all(m == m.T): # would be nice to short-circuit check
raise ValueError("m is not symmetric.")
c, _ = linalg.cho_factor(m, lower=True)
return 2*np.sum(np.log(c.diagonal())) | Return log(det(m)) asserting positive definiteness of m.
Parameters
----------
m : array_like
2d array that is positive-definite (and symmetric)
Returns
-------
logdet : float
The log-determinant of m. | logdet_symm | python | statsmodels/statsmodels | statsmodels/tools/linalg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/linalg.py | BSD-3-Clause |
def stationary_solve(r, b):
"""
Solve a linear system for a Toeplitz correlation matrix.
A Toeplitz correlation matrix represents the covariance of a
stationary series with unit variance.
Parameters
----------
r : array_like
A vector describing the coefficient matrix. r[0] is the first
band next to the diagonal, r[1] is the second band, etc.
b : array_like
The right-hand side for which we are solving, i.e. we solve
Tx = b and return b, where T is the Toeplitz coefficient matrix.
Returns
-------
The solution to the linear system.
"""
db = r[0:1]
dim = b.ndim
if b.ndim == 1:
b = b[:, None]
x = b[0:1, :]
for j in range(1, len(b)):
rf = r[0:j][::-1]
a = (b[j, :] - np.dot(rf, x)) / (1 - np.dot(rf, db[::-1]))
z = x - np.outer(db[::-1], a)
x = np.concatenate((z, a[None, :]), axis=0)
if j == len(b) - 1:
break
rn = r[j]
a = (rn - np.dot(rf, db)) / (1 - np.dot(rf, db[::-1]))
z = db - a*db[::-1]
db = np.concatenate((z, np.r_[a]))
if dim == 1:
x = x[:, 0]
return x | Solve a linear system for a Toeplitz correlation matrix.
A Toeplitz correlation matrix represents the covariance of a
stationary series with unit variance.
Parameters
----------
r : array_like
A vector describing the coefficient matrix. r[0] is the first
band next to the diagonal, r[1] is the second band, etc.
b : array_like
The right-hand side for which we are solving, i.e. we solve
Tx = b and return b, where T is the Toeplitz coefficient matrix.
Returns
-------
The solution to the linear system. | stationary_solve | python | statsmodels/statsmodels | statsmodels/tools/linalg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/linalg.py | BSD-3-Clause |
def transf_constraints(constraints):
"""use QR to get transformation matrix to impose constraint
Parameters
----------
constraints : ndarray, 2-D
restriction matrix with one constraints in rows
Returns
-------
transf : ndarray
transformation matrix to reparameterize so that constraint is
imposed
Notes
-----
This is currently and internal helper function for GAM.
API not stable and will most likely change.
The code for this function was taken from patsy spline handling, and
corresponds to the reparameterization used by Wood in R's mgcv package.
See Also
--------
statsmodels.base._constraints.TransformRestriction : class to impose
constraints by reparameterization used by `_fit_constrained`.
"""
from scipy import linalg
m = constraints.shape[0]
q, _ = linalg.qr(np.transpose(constraints))
transf = q[:, m:]
return transf | use QR to get transformation matrix to impose constraint
Parameters
----------
constraints : ndarray, 2-D
restriction matrix with one constraints in rows
Returns
-------
transf : ndarray
transformation matrix to reparameterize so that constraint is
imposed
Notes
-----
This is currently and internal helper function for GAM.
API not stable and will most likely change.
The code for this function was taken from patsy spline handling, and
corresponds to the reparameterization used by Wood in R's mgcv package.
See Also
--------
statsmodels.base._constraints.TransformRestriction : class to impose
constraints by reparameterization used by `_fit_constrained`. | transf_constraints | python | statsmodels/statsmodels | statsmodels/tools/linalg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/linalg.py | BSD-3-Clause |
def matrix_sqrt(mat, inverse=False, full=False, nullspace=False,
threshold=1e-15):
"""matrix square root for symmetric matrices
Usage is for decomposing a covariance function S into a square root R
such that
R' R = S if inverse is False, or
R' R = pinv(S) if inverse is True
Parameters
----------
mat : array_like, 2-d square
symmetric square matrix for which square root or inverse square
root is computed.
There is no checking for whether the matrix is symmetric.
A warning is issued if some singular values are negative, i.e.
below the negative of the threshold.
inverse : bool
If False (default), then the matrix square root is returned.
If inverse is True, then the matrix square root of the inverse
matrix is returned.
full : bool
If full is False (default, then the square root has reduce number
of rows if the matrix is singular, i.e. has singular values below
the threshold.
nullspace : bool
If nullspace is true, then the matrix square root of the null space
of the matrix is returned.
threshold : float
Singular values below the threshold are dropped.
Returns
-------
msqrt : ndarray
matrix square root or square root of inverse matrix.
"""
# see also scipy.linalg null_space
u, s, v = np.linalg.svd(mat)
if np.any(s < -threshold):
import warnings
warnings.warn('some singular values are negative')
if not nullspace:
mask = s > threshold
s[s < threshold] = 0
else:
mask = s < threshold
s[s > threshold] = 0
sqrt_s = np.sqrt(s[mask])
if inverse:
sqrt_s = 1 / np.sqrt(s[mask])
if full:
b = np.dot(u[:, mask], np.dot(np.diag(sqrt_s), v[mask]))
else:
b = np.dot(np.diag(sqrt_s), v[mask])
return b | matrix square root for symmetric matrices
Usage is for decomposing a covariance function S into a square root R
such that
R' R = S if inverse is False, or
R' R = pinv(S) if inverse is True
Parameters
----------
mat : array_like, 2-d square
symmetric square matrix for which square root or inverse square
root is computed.
There is no checking for whether the matrix is symmetric.
A warning is issued if some singular values are negative, i.e.
below the negative of the threshold.
inverse : bool
If False (default), then the matrix square root is returned.
If inverse is True, then the matrix square root of the inverse
matrix is returned.
full : bool
If full is False (default, then the square root has reduce number
of rows if the matrix is singular, i.e. has singular values below
the threshold.
nullspace : bool
If nullspace is true, then the matrix square root of the null space
of the matrix is returned.
threshold : float
Singular values below the threshold are dropped.
Returns
-------
msqrt : ndarray
matrix square root or square root of inverse matrix. | matrix_sqrt | python | statsmodels/statsmodels | statsmodels/tools/linalg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/linalg.py | BSD-3-Clause |
def add_indep(x, varnames, dtype=None):
'''
construct array with independent columns
x is either iterable (list, tuple) or instance of ndarray or a subclass
of it. If x is an ndarray, then each column is assumed to represent a
variable with observations in rows.
'''
# TODO: this needs tests for subclasses
if isinstance(x, np.ndarray) and x.ndim == 2:
x = x.T
nvars_orig = len(x)
nobs = len(x[0])
if not dtype:
dtype = np.asarray(x[0]).dtype
xout = np.zeros((nobs, nvars_orig), dtype=dtype)
count = 0
rank_old = 0
varnames_new = []
varnames_dropped = []
for (xi, ni) in zip(x, varnames):
xout[:, count] = xi
rank_new = np.linalg.matrix_rank(xout)
if rank_new > rank_old:
varnames_new.append(ni)
rank_old = rank_new
count += 1
else:
varnames_dropped.append(ni)
return xout[:, :count], varnames_new | construct array with independent columns
x is either iterable (list, tuple) or instance of ndarray or a subclass
of it. If x is an ndarray, then each column is assumed to represent a
variable with observations in rows. | add_indep | python | statsmodels/statsmodels | statsmodels/tools/catadd.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/catadd.py | BSD-3-Clause |
def deprecated_alias(old_name, new_name, remove_version=None, msg=None,
warning=FutureWarning):
"""
Deprecate attribute in favor of alternative name.
Parameters
----------
old_name : str
Old, deprecated name
new_name : str
New name
remove_version : str, optional
Version that the alias will be removed
msg : str, optional
Message to show. Default is
`old_name` is a deprecated alias for `new_name`
warning : Warning, optional
Warning class to give. Default is FutureWarning.
Notes
-----
Older or less-used classes may not conform to statsmodels naming
conventions. `deprecated_alias` lets us bring them into conformance
without breaking backward-compatibility.
Example
-------
Instances of the `Foo` class have a `nvars` attribute, but it _should_
be called `neqs`:
class Foo:
nvars = deprecated_alias('nvars', 'neqs')
def __init__(self, neqs):
self.neqs = neqs
>>> foo = Foo(3)
>>> foo.nvars
__main__:1: FutureWarning: nvars is a deprecated alias for neqs
3
"""
if msg is None:
msg = f'{old_name} is a deprecated alias for {new_name}'
if remove_version is not None:
msg += ', will be removed in version %s' % remove_version
def fget(self):
warnings.warn(msg, warning, stacklevel=2)
return getattr(self, new_name)
def fset(self, value):
warnings.warn(msg, warning, stacklevel=2)
setattr(self, new_name, value)
res = property(fget=fget, fset=fset)
return res | Deprecate attribute in favor of alternative name.
Parameters
----------
old_name : str
Old, deprecated name
new_name : str
New name
remove_version : str, optional
Version that the alias will be removed
msg : str, optional
Message to show. Default is
`old_name` is a deprecated alias for `new_name`
warning : Warning, optional
Warning class to give. Default is FutureWarning.
Notes
-----
Older or less-used classes may not conform to statsmodels naming
conventions. `deprecated_alias` lets us bring them into conformance
without breaking backward-compatibility.
Example
-------
Instances of the `Foo` class have a `nvars` attribute, but it _should_
be called `neqs`:
class Foo:
nvars = deprecated_alias('nvars', 'neqs')
def __init__(self, neqs):
self.neqs = neqs
>>> foo = Foo(3)
>>> foo.nvars
__main__:1: FutureWarning: nvars is a deprecated alias for neqs
3 | deprecated_alias | python | statsmodels/statsmodels | statsmodels/tools/decorators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/decorators.py | BSD-3-Clause |
def show_versions(show_dirs=True):
"""
List the versions of statsmodels and any installed dependencies
Parameters
----------
show_dirs : bool
Flag indicating to show module locations
"""
if not show_dirs:
_show_versions_only()
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
uname = platform.uname()
sysname = uname.system
release = uname.release
version = uname.version
machine = uname.machine
print(f"OS: {sysname} {release} {version} {machine}")
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get("LC_ALL", "None"))
print("LANG: %s" % os.environ.get("LANG", "None"))
try:
import statsmodels
has_sm = True
except ImportError:
has_sm = False
print("\nstatsmodels\n===========\n")
if has_sm:
print(
"Installed: {} ({})".format(
safe_version(statsmodels), dirname(statsmodels.__file__)
)
)
else:
print("Not installed")
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: {} ({})".format(safe_version(Cython), dirname(Cython.__file__)))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: {} ({})".format(safe_version(numpy), dirname(numpy.__file__)))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: {} ({})".format(safe_version(scipy), dirname(scipy.__file__)))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print(
"pandas: {} ({})".format(
safe_version(pandas, "__version__"),
dirname(pandas.__file__),
)
)
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(
" dateutil: {} ({})".format(
safe_version(dateutil), dirname(dateutil.__file__)
)
)
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: {} ({})".format(safe_version(patsy), dirname(patsy.__file__)))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: {} ({})".format(safe_version(mpl), dirname(mpl.__file__)))
print(" backend: %s " % mpl.rcParams["backend"])
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print(
"cvxopt: {} ({})".format(
safe_version(info, "version"), dirname(info.__file__)
)
)
except ImportError:
print("cvxopt: Not installed")
try:
import joblib
print("joblib: {} ({})".format(safe_version(joblib), dirname(joblib.__file__)))
except ImportError:
print("joblib: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print(
"IPython: {} ({})".format(safe_version(IPython), dirname(IPython.__file__))
)
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(
" jinja2: {} ({})".format(safe_version(jinja2), dirname(jinja2.__file__))
)
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: {} ({})".format(safe_version(sphinx), dirname(sphinx.__file__)))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(
" pygments: {} ({})".format(
safe_version(pygments), dirname(pygments.__file__)
)
)
except ImportError:
print(" pygments: Not installed")
try:
import pytest
print(f"pytest: {safe_version(pytest)} ({dirname(pytest.__file__)})")
except ImportError:
print("pytest: Not installed")
try:
import virtualenv
print(
"virtualenv: {} ({})".format(
safe_version(virtualenv), dirname(virtualenv.__file__)
)
)
except ImportError:
print("virtualenv: Not installed")
print("\n") | List the versions of statsmodels and any installed dependencies
Parameters
----------
show_dirs : bool
Flag indicating to show module locations | show_versions | python | statsmodels/statsmodels | statsmodels/tools/print_version.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/print_version.py | BSD-3-Clause |
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n") | Deindent a list of lines maximally | dedent_lines | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def strip_blank_lines(line):
"""Remove leading and trailing blank lines from a list of lines"""
while line and not line[0].strip():
del line[0]
while line and not line[-1].strip():
del line[-1]
return line | Remove leading and trailing blank lines from a list of lines | strip_blank_lines | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split("\n") # store string as list of lines
self.reset() | Parameters
----------
data : str
String with lines separated by '\n'. | __init__ | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def parse_item_name(text):
"""Match ':role:`name`' or 'name'."""
m = self._func_rgx.match(text)
if not m:
raise ParseError(f"{text} is not a item name")
role = m.group("role")
name = m.group("name") if role else m.group("name2")
return name, role, m.end() | Match ':role:`name`' or 'name'. | _parse_see_also.parse_item_name | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'."""
m = self._func_rgx.match(text)
if not m:
raise ParseError(f"{text} is not a item name")
role = m.group("role")
name = m.group("name") if role else m.group("name2")
return name, role, m.end()
rest = []
for line in content:
if not line.strip():
continue
line_match = self._line_rgx.match(line)
description = None
if line_match:
description = line_match.group("desc")
if line_match.group("trailing") and description:
self._error_location(
"Unexpected comma or period after function list at "
"index %d of line "
'"%s"' % (line_match.end("trailing"), line)
)
if not description and line.startswith(" "):
rest.append(line.strip())
elif line_match:
funcs = []
text = line_match.group("allfuncs")
while True:
if not text.strip():
break
name, role, match_end = parse_item_name(text)
funcs.append((name, role))
text = text[match_end:].strip()
if text and text[0] == ",":
text = text[1:].strip()
rest = list(filter(None, [description]))
items.append((funcs, rest))
else:
raise ParseError(f"{line} is not a item name")
return items | func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3 | _parse_see_also | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split("::")
if len(section) > 1:
out["default"] = strip_each_in(section[1].split(","))[0]
for line in content:
line = line.split(":")
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(","))
return out | .. index: default
:refguide: something, else, and more | _parse_index | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
# If several signatures present, take the last one
while True:
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
compiled = re.compile(r"^([\w., ]+=)?\s*[\w\.]+\(.*\)$")
if compiled.match(summary_str):
self["Signature"] = summary_str
if not self._is_at_section():
continue
break
if summary is not None:
self["Summary"] = summary
if not self._is_at_section():
self["Extended Summary"] = self._read_to_next_section() | Grab signature (if given) and summary | _parse_summary | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def remove_parameters(self, parameters):
"""
Parameters
----------
parameters : str, list[str]
The names of the parameters to remove.
"""
if self._docstring is None:
# Protection against -oo execution
return
if isinstance(parameters, str):
parameters = [parameters]
repl = [
param
for param in self._ds["Parameters"]
if param.name not in parameters
]
if len(repl) + len(parameters) != len(self._ds["Parameters"]):
raise ValueError("One or more parameters were not found.")
self._ds["Parameters"] = repl | Parameters
----------
parameters : str, list[str]
The names of the parameters to remove. | remove_parameters | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def insert_parameters(self, after, parameters):
"""
Parameters
----------
after : {None, str}
If None, inset the parameters before the first parameter in the
docstring.
parameters : Parameter, list[Parameter]
A Parameter of a list of Parameters.
"""
if self._docstring is None:
# Protection against -oo execution
return
if isinstance(parameters, Parameter):
parameters = [parameters]
if after is None:
self._ds["Parameters"] = parameters + self._ds["Parameters"]
else:
loc = -1
for i, param in enumerate(self._ds["Parameters"]):
if param.name == after:
loc = i + 1
break
if loc < 0:
raise ValueError()
params = self._ds["Parameters"][:loc] + parameters
params += self._ds["Parameters"][loc:]
self._ds["Parameters"] = params | Parameters
----------
after : {None, str}
If None, inset the parameters before the first parameter in the
docstring.
parameters : Parameter, list[Parameter]
A Parameter of a list of Parameters. | insert_parameters | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def replace_block(self, block_name, block):
"""
Parameters
----------
block_name : str
Name of the block to replace, e.g., 'Summary'.
block : object
The replacement block. The structure of the replacement block must
match how the block is stored by NumpyDocString.
"""
if self._docstring is None:
# Protection against -oo execution
return
block_name = " ".join(map(str.capitalize, block_name.split(" ")))
if block_name not in self._ds:
raise ValueError(
"{} is not a block in the docstring".format(block_name)
)
if not isinstance(block, list) and isinstance(
self._ds[block_name], list
):
block = [block]
self._ds[block_name] = block | Parameters
----------
block_name : str
Name of the block to replace, e.g., 'Summary'.
block : object
The replacement block. The structure of the replacement block must
match how the block is stored by NumpyDocString. | replace_block | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def remove_parameters(docstring, parameters):
"""
Parameters
----------
docstring : str
The docstring to modify.
parameters : str, list[str]
The names of the parameters to remove.
Returns
-------
str
The modified docstring.
"""
if docstring is None:
return
ds = Docstring(docstring)
ds.remove_parameters(parameters)
return str(ds) | Parameters
----------
docstring : str
The docstring to modify.
parameters : str, list[str]
The names of the parameters to remove.
Returns
-------
str
The modified docstring. | remove_parameters | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def indent(text, prefix, predicate=None):
"""
Non-protected indent
Parameters
----------
text : {None, str}
If None, function always returns ""
prefix : str
Prefix to add to the start of each line
predicate : callable, optional
If provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
Returns
-------
"""
if text is None:
return ""
return textwrap.indent(text, prefix, predicate=predicate) | Non-protected indent
Parameters
----------
text : {None, str}
If None, function always returns ""
prefix : str
Prefix to add to the start of each line
predicate : callable, optional
If provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
Returns
------- | indent | python | statsmodels/statsmodels | statsmodels/tools/docstring.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/docstring.py | BSD-3-Clause |
def transform(self, data):
"""standardize the data using the stored transformation
"""
# could use scipy.stats.zscore instead
if self.mean is None:
return np.asarray(data) / self.scale
else:
return (np.asarray(data) - self.mean) / self.scale | standardize the data using the stored transformation | transform | python | statsmodels/statsmodels | statsmodels/tools/transform_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/transform_model.py | BSD-3-Clause |
def transform_params(self, params):
"""Transform parameters of the standardized model to the original model
Parameters
----------
params : ndarray
parameters estimated with the standardized model
Returns
-------
params_new : ndarray
parameters transformed to the parameterization of the original
model
"""
params_new = params / self.scale
if self.const_idx != 'n':
params_new[self.const_idx] -= (params_new * self.mean).sum()
return params_new | Transform parameters of the standardized model to the original model
Parameters
----------
params : ndarray
parameters estimated with the standardized model
Returns
-------
params_new : ndarray
parameters transformed to the parameterization of the original
model | transform_params | python | statsmodels/statsmodels | statsmodels/tools/transform_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/transform_model.py | BSD-3-Clause |
def interpret_data(data, colnames=None, rownames=None):
"""
Convert passed data structure to form required by estimation classes
Parameters
----------
data : array_like
colnames : sequence or None
May be part of data structure
rownames : sequence or None
Returns
-------
(values, colnames, rownames) : (homogeneous ndarray, list)
"""
if isinstance(data, np.ndarray):
values = np.asarray(data)
if colnames is None:
colnames = ['Y_%d' % i for i in range(values.shape[1])]
elif is_data_frame(data):
# XXX: hack
data = data.dropna()
values = data.values
colnames = data.columns
rownames = data.index
else: # pragma: no cover
raise TypeError('Cannot handle input type {typ}'
.format(typ=type(data).__name__))
if not isinstance(colnames, list):
colnames = list(colnames)
# sanity check
if len(colnames) != values.shape[1]:
raise ValueError('length of colnames does not match number '
'of columns in data')
if rownames is not None and len(rownames) != len(values):
raise ValueError('length of rownames does not match number '
'of rows in data')
return values, colnames, rownames | Convert passed data structure to form required by estimation classes
Parameters
----------
data : array_like
colnames : sequence or None
May be part of data structure
rownames : sequence or None
Returns
-------
(values, colnames, rownames) : (homogeneous ndarray, list) | interpret_data | python | statsmodels/statsmodels | statsmodels/tools/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/data.py | BSD-3-Clause |
def _is_recarray(data):
"""
Returns true if data is a recarray
"""
if NP_LT_2:
return isinstance(data, np.core.recarray)
else:
return isinstance(data, np.rec.recarray) | Returns true if data is a recarray | _is_recarray | python | statsmodels/statsmodels | statsmodels/tools/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/data.py | BSD-3-Clause |
def _as_array_with_name(obj, default_name):
"""
Call np.asarray() on obj and attempt to get the name if its a Series.
Parameters
----------
obj: pd.Series
Series to convert to an array
default_name: str
The default name to return in case the object isn't a pd.Series or has
no name attribute.
Returns
-------
array_and_name: tuple[np.ndarray, str]
The data casted to np.ndarra and the series name or None
"""
if is_series(obj):
return (np.asarray(obj), obj.name)
return (np.asarray(obj), default_name) | Call np.asarray() on obj and attempt to get the name if its a Series.
Parameters
----------
obj: pd.Series
Series to convert to an array
default_name: str
The default name to return in case the object isn't a pd.Series or has
no name attribute.
Returns
-------
array_and_name: tuple[np.ndarray, str]
The data casted to np.ndarra and the series name or None | _as_array_with_name | python | statsmodels/statsmodels | statsmodels/tools/data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/data.py | BSD-3-Clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.