code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog. This
can is given by `score_factor0[:, None] * exog` where
`score_factor0` is the score_factor without the residual.
"""
linpred = self.predict(params, which="linear")
pdf_ = self.pdf(linpred)
# clip to get rid of invalid divide complaint
cdf_ = np.clip(self.cdf(linpred), FLOAT_EPS, 1 - FLOAT_EPS)
deriv = pdf_ / cdf_ / (1 - cdf_) # deriv factor
return deriv[:, None] * self.exog | derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog. This
can is given by `score_factor0[:, None] * exog` where
`score_factor0` is the score_factor without the residual. | _deriv_score_obs_dendog | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def pdf(self, eXB):
"""
NotImplemented
"""
raise NotImplementedError | NotImplemented | pdf | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def cdf(self, X):
"""
Multinomial logit cumulative distribution function.
Parameters
----------
X : ndarray
The linear predictor of the model XB.
Returns
-------
cdf : ndarray
The cdf evaluated at `X`.
Notes
-----
In the multinomial logit model.
.. math:: \\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}
"""
eXB = np.column_stack((np.ones(len(X)), np.exp(X)))
return eXB/eXB.sum(1)[:,None] | Multinomial logit cumulative distribution function.
Parameters
----------
X : ndarray
The linear predictor of the model XB.
Returns
-------
cdf : ndarray
The cdf evaluated at `X`.
Notes
-----
In the multinomial logit model.
.. math:: \\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)} | cdf | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def loglike(self, params):
"""
Log-likelihood of the multinomial logit model.
Parameters
----------
params : array_like
The parameters of the multinomial logit model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math::
\\ln L=\\sum_{i=1}^{n}\\sum_{j=0}^{J}d_{ij}\\ln
\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}
{\\sum_{k=0}^{J}
\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not.
"""
params = params.reshape(self.K, -1, order='F')
d = self.wendog
logprob = np.log(self.cdf(np.dot(self.exog,params)))
return np.sum(d * logprob) | Log-likelihood of the multinomial logit model.
Parameters
----------
params : array_like
The parameters of the multinomial logit model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math::
\\ln L=\\sum_{i=1}^{n}\\sum_{j=0}^{J}d_{ij}\\ln
\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}
{\\sum_{k=0}^{J}
\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not. | loglike | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def loglikeobs(self, params):
"""
Log-likelihood of the multinomial logit model for each observation.
Parameters
----------
params : array_like
The parameters of the multinomial logit model.
Returns
-------
loglike : array_like
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math::
\\ln L_{i}=\\sum_{j=0}^{J}d_{ij}\\ln
\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}
{\\sum_{k=0}^{J}
\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
for observations :math:`i=1,...,n`
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not.
"""
params = params.reshape(self.K, -1, order='F')
d = self.wendog
logprob = np.log(self.cdf(np.dot(self.exog,params)))
return d * logprob | Log-likelihood of the multinomial logit model for each observation.
Parameters
----------
params : array_like
The parameters of the multinomial logit model.
Returns
-------
loglike : array_like
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math::
\\ln L_{i}=\\sum_{j=0}^{J}d_{ij}\\ln
\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}
{\\sum_{k=0}^{J}
\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
for observations :math:`i=1,...,n`
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not. | loglikeobs | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def score(self, params):
"""
Score matrix for multinomial logit model log-likelihood
Parameters
----------
params : ndarray
The parameters of the multinomial logit model.
Returns
-------
score : ndarray, (K * (J-1),)
The 2-d score vector, i.e. the first derivative of the
loglikelihood function, of the multinomial logit model evaluated at
`params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta_{j}}=\\sum_{i}\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`
In the multinomial model the score matrix is K x J-1 but is returned
as a flattened array to work with the solvers.
"""
params = params.reshape(self.K, -1, order='F')
firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,
params))[:,1:]
#NOTE: might need to switch terms if params is reshaped
return np.dot(firstterm.T, self.exog).flatten() | Score matrix for multinomial logit model log-likelihood
Parameters
----------
params : ndarray
The parameters of the multinomial logit model.
Returns
-------
score : ndarray, (K * (J-1),)
The 2-d score vector, i.e. the first derivative of the
loglikelihood function, of the multinomial logit model evaluated at
`params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta_{j}}=\\sum_{i}\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`
In the multinomial model the score matrix is K x J-1 but is returned
as a flattened array to work with the solvers. | score | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def loglike_and_score(self, params):
"""
Returns log likelihood and score, efficiently reusing calculations.
Note that both of these returned quantities will need to be negated
before being minimized by the maximum likelihood fitting machinery.
"""
params = params.reshape(self.K, -1, order='F')
cdf_dot_exog_params = self.cdf(np.dot(self.exog, params))
loglike_value = np.sum(self.wendog * np.log(cdf_dot_exog_params))
firstterm = self.wendog[:, 1:] - cdf_dot_exog_params[:, 1:]
score_array = np.dot(firstterm.T, self.exog).flatten()
return loglike_value, score_array | Returns log likelihood and score, efficiently reusing calculations.
Note that both of these returned quantities will need to be negated
before being minimized by the maximum likelihood fitting machinery. | loglike_and_score | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def score_obs(self, params):
"""
Jacobian matrix for multinomial logit model log-likelihood
Parameters
----------
params : ndarray
The parameters of the multinomial logit model.
Returns
-------
jac : array_like
The derivative of the loglikelihood for each observation evaluated
at `params` .
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta_{j}}=\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`, for observations :math:`i=1,...,n`
In the multinomial model the score vector is K x (J-1) but is returned
as a flattened array. The Jacobian has the observations in rows and
the flattened array of derivatives in columns.
"""
params = params.reshape(self.K, -1, order='F')
firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,
params))[:,1:]
#NOTE: might need to switch terms if params is reshaped
return (firstterm[:,:,None] * self.exog[:,None,:]).reshape(self.exog.shape[0], -1) | Jacobian matrix for multinomial logit model log-likelihood
Parameters
----------
params : ndarray
The parameters of the multinomial logit model.
Returns
-------
jac : array_like
The derivative of the loglikelihood for each observation evaluated
at `params` .
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta_{j}}=\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`, for observations :math:`i=1,...,n`
In the multinomial model the score vector is K x (J-1) but is returned
as a flattened array. The Jacobian has the observations in rows and
the flattened array of derivatives in columns. | score_obs | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def hessian(self, params):
"""
Multinomial logit Hessian matrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (J*K, J*K)
The Hessian, second derivative of loglikelihood function with
respect to the flattened parameters, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta_{j}\\partial\\beta_{l}}=-\\sum_{i=1}^{n}\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\left[\\boldsymbol{1}\\left(j=l\\right)-\\frac{\\exp\\left(\\beta_{l}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right]x_{i}x_{l}^{\\prime}
where
:math:`\\boldsymbol{1}\\left(j=l\\right)` equals 1 if `j` = `l` and 0
otherwise.
The actual Hessian matrix has J**2 * K x K elements. Our Hessian
is reshaped to be square (J*K, J*K) so that the solvers can use it.
This implementation does not take advantage of the symmetry of
the Hessian and could probably be refactored for speed.
"""
params = params.reshape(self.K, -1, order='F')
X = self.exog
pr = self.cdf(np.dot(X,params))
partials = []
J = self.J
K = self.K
for i in range(J-1):
for j in range(J-1): # this loop assumes we drop the first col.
if i == j:
partials.append(\
-np.dot(((pr[:,i+1]*(1-pr[:,j+1]))[:,None]*X).T,X))
else:
partials.append(-np.dot(((pr[:,i+1]*-pr[:,j+1])[:,None]*X).T,X))
H = np.array(partials)
# the developer's notes on multinomial should clear this math up
H = np.transpose(H.reshape(J-1, J-1, K, K), (0, 2, 1, 3)).reshape((J-1)*K, (J-1)*K)
return H | Multinomial logit Hessian matrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (J*K, J*K)
The Hessian, second derivative of loglikelihood function with
respect to the flattened parameters, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta_{j}\\partial\\beta_{l}}=-\\sum_{i=1}^{n}\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\left[\\boldsymbol{1}\\left(j=l\\right)-\\frac{\\exp\\left(\\beta_{l}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right]x_{i}x_{l}^{\\prime}
where
:math:`\\boldsymbol{1}\\left(j=l\\right)` equals 1 if `j` = `l` and 0
otherwise.
The actual Hessian matrix has J**2 * K x K elements. Our Hessian
is reshaped to be square (J*K, J*K) so that the solvers can use it.
This implementation does not take advantage of the symmetry of
the Hessian and could probably be refactored for speed. | hessian | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def _score_nbin(self, params, Q=0):
"""
Score vector for NB2 model
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
a1 = 1/alpha * mu**Q
prob = a1 / (a1 + mu) # a1 aka "size" in _ll_nbin
if Q == 1: # nb1
# Q == 1 --> a1 = mu / alpha --> prob = 1 / (alpha + 1)
dgpart = digamma(y + a1) - digamma(a1)
dparams = exog * a1 * (np.log(prob) +
dgpart)
dalpha = ((alpha * (y - mu * np.log(prob) -
mu*(dgpart + 1)) -
mu * (np.log(prob) +
dgpart))/
(alpha**2*(alpha + 1))).sum()
elif Q == 0: # nb2
dgpart = digamma(y + a1) - digamma(a1)
dparams = exog*a1 * (y-mu)/(mu+a1)
da1 = -alpha**-2
dalpha = (dgpart + np.log(a1)
- np.log(a1+mu) - (y-mu)/(a1+mu)).sum() * da1
#multiply above by constant outside sum to reduce rounding error
if self._transparams:
return np.r_[dparams.sum(0), dalpha*alpha]
else:
return np.r_[dparams.sum(0), dalpha] | Score vector for NB2 model | _score_nbin | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def _hessian_nb1(self, params):
"""
Hessian of NB1 model.
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
a1 = mu/alpha
dgpart = digamma(y + a1) - digamma(a1)
prob = 1 / (1 + alpha) # equiv: a1 / (a1 + mu)
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim+1,dim+1))
#const_arr = a1*mu*(a1+y)/(mu+a1)**2
# not all of dparams
dparams = exog / alpha * (np.log(prob) +
dgpart)
dmudb = exog*mu
xmu_alpha = exog * a1
trigamma = (special.polygamma(1, a1 + y) -
special.polygamma(1, a1))
for i in range(dim):
for j in range(dim):
if j > i:
continue
hess_arr[i,j] = np.squeeze(
np.sum(
dparams[:,i,None] * dmudb[:,j,None] +
xmu_alpha[:,i,None] * xmu_alpha[:,j,None] * trigamma,
axis=0
)
)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
# for dl/dparams dalpha
# da1 = -alpha**-2
dldpda = np.sum(-a1 * dparams + exog * a1 *
(-trigamma*mu/alpha**2 - prob), axis=0)
hess_arr[-1,:-1] = dldpda
hess_arr[:-1,-1] = dldpda
log_alpha = np.log(prob)
alpha3 = alpha**3
alpha2 = alpha**2
mu2 = mu**2
dada = ((alpha3*mu*(2*log_alpha + 2*dgpart + 3) -
2*alpha3*y +
4*alpha2*mu*(log_alpha + dgpart) +
alpha2 * (2*mu - y) +
2*alpha*mu2*trigamma + mu2 * trigamma + alpha2 * mu2 * trigamma +
2*alpha*mu*(log_alpha + dgpart)
)/(alpha**4*(alpha2 + 2*alpha + 1)))
hess_arr[-1,-1] = dada.sum()
return hess_arr | Hessian of NB1 model. | _hessian_nb1 | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def _hessian_nb2(self, params):
"""
Hessian of NB2 model.
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
a1 = 1/alpha
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
prob = a1 / (a1 + mu)
dgpart = digamma(a1 + y) - digamma(a1)
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim+1,dim+1))
const_arr = a1*mu*(a1+y)/(mu+a1)**2
for i in range(dim):
for j in range(dim):
if j > i:
continue
hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *
const_arr, axis=0).squeeze()
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
# for dl/dparams dalpha
da1 = -alpha**-2
dldpda = -np.sum(mu*exog*(y-mu)*a1**2/(mu+a1)**2 , axis=0)
hess_arr[-1,:-1] = dldpda
hess_arr[:-1,-1] = dldpda
# for dl/dalpha dalpha
#NOTE: polygamma(1,x) is the trigamma function
da2 = 2*alpha**-3
dalpha = da1 * (dgpart +
np.log(prob) - (y - mu)/(a1+mu))
dada = (da2 * dalpha/da1 + da1**2 * (special.polygamma(1, a1+y) -
special.polygamma(1, a1) + 1/a1 - 1/(a1 + mu) +
(y - mu)/(mu + a1)**2)).sum()
hess_arr[-1,-1] = dada
return hess_arr | Hessian of NB2 model. | _hessian_nb2 | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def get_distribution(self, params, exog=None, exposure=None, offset=None):
"""get frozen instance of distribution
"""
mu = self.predict(params, exog=exog, exposure=exposure, offset=offset)
if self.loglike_method == 'geometric':
# distr = stats.geom(1 / (1 + mu[:, None]), loc=-1)
distr = stats.geom(1 / (1 + mu), loc=-1)
else:
if self.loglike_method == 'nb2':
p = 2
elif self.loglike_method == 'nb1':
p = 1
alpha = params[-1]
q = 2 - p
size = 1. / alpha * mu**q
prob = size / (size + mu)
# distr = nbinom(size[:, None], prob[:, None])
distr = nbinom(size, prob)
return distr | get frozen instance of distribution | get_distribution | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def loglike(self, params):
"""
Loglikelihood of Generalized Negative Binomial (NB-P) model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
"""
return np.sum(self.loglikeobs(params)) | Loglikelihood of Generalized Negative Binomial (NB-P) model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes. | loglike | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generalized Negative Binomial (NB-P) model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
y = self.endog
mu = self.predict(params)
mu_p = mu**(2 - p)
a1 = mu_p / alpha
a2 = mu + a1
llf = (gammaln(y + a1) - gammaln(y + 1) - gammaln(a1) +
a1 * np.log(a1) + y * np.log(mu) -
(y + a1) * np.log(a2))
return llf | Loglikelihood for observations of Generalized Negative Binomial (NB-P) model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes | loglikeobs | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def score_obs(self, params):
"""
Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood for each observations.
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = 2 - self.parameterization
y = self.endog
mu = self.predict(params)
mu_p = mu**p
a1 = mu_p / alpha
a2 = mu + a1
a3 = y + a1
a4 = p * a1 / mu
dgpart = digamma(a3) - digamma(a1)
dgterm = dgpart + np.log(a1 / a2) + 1 - a3 / a2
# TODO: better name/interpretation for dgterm?
dparams = (a4 * dgterm -
a3 / a2 +
y / mu)
dparams = (self.exog.T * mu * dparams).T
dalpha = -a1 / alpha * dgterm
return np.concatenate((dparams, np.atleast_2d(dalpha).T),
axis=1) | Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood for each observations.
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params` | score_obs | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def score(self, params):
"""
Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
score = np.sum(self.score_obs(params), axis=0)
if self._transparams:
score[-1] == score[-1] ** 2
return score
else:
return score | Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params` | score | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def score_factor(self, params, endog=None):
"""
Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood for each observations.
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params = np.asarray(params)
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = 2 - self.parameterization
y = self.endog if endog is None else endog
mu = self.predict(params)
mu_p = mu**p
a1 = mu_p / alpha
a2 = mu + a1
a3 = y + a1
a4 = p * a1 / mu
dgpart = digamma(a3) - digamma(a1)
dparams = ((a4 * dgpart -
a3 / a2) +
y / mu + a4 * (1 - a3 / a2 + np.log(a1 / a2)))
dparams = (mu * dparams).T
dalpha = (-a1 / alpha * (dgpart +
np.log(a1 / a2) +
1 - a3 / a2))
return dparams, dalpha | Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood for each observations.
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params` | score_factor | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def hessian(self, params):
"""
Generalized Negative Binomial (NB-P) model hessian maxtrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hessian : ndarray, 2-D
The hessian matrix of the model.
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = 2 - self.parameterization
y = self.endog
exog = self.exog
mu = self.predict(params)
mu_p = mu**p
a1 = mu_p / alpha
a2 = mu + a1
a3 = y + a1
a4 = p * a1 / mu
prob = a1 / a2
lprob = np.log(prob)
dgpart = digamma(a3) - digamma(a1)
pgpart = polygamma(1, a3) - polygamma(1, a1)
dim = exog.shape[1]
hess_arr = np.zeros((dim + 1, dim + 1))
coeff = mu**2 * (((1 + a4)**2 * a3 / a2**2 -
a3 / a2 * (p - 1) * a4 / mu -
y / mu**2 -
2 * a4 * (1 + a4) / a2 +
p * a4 / mu * (lprob + dgpart + 2) -
a4 / mu * (lprob + dgpart + 1) +
a4**2 * pgpart) +
(-(1 + a4) * a3 / a2 +
y / mu +
a4 * (lprob + dgpart + 1)) / mu)
for i in range(dim):
hess_arr[i, :-1] = np.sum(self.exog[:, :].T * self.exog[:, i] * coeff, axis=1)
hess_arr[-1,:-1] = (self.exog[:, :].T * mu * a1 *
((1 + a4) * (1 - a3 / a2) / a2 -
p * (lprob + dgpart + 2) / mu +
p / mu * (a3 + p * a1) / a2 -
a4 * pgpart) / alpha).sum(axis=1)
da2 = (a1 * (2 * lprob +
2 * dgpart + 3 -
2 * a3 / a2
+ a1 * pgpart
- 2 * prob +
prob * a3 / a2) / alpha**2)
hess_arr[-1, -1] = da2.sum()
tri_idx = np.triu_indices(dim + 1, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr | Generalized Negative Binomial (NB-P) model hessian maxtrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hessian : ndarray, 2-D
The hessian matrix of the model. | hessian | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def hessian_factor(self, params):
"""
Generalized Negative Binomial (NB-P) model hessian maxtrix of the log-likelihood
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
hessian : ndarray, 2-D
The hessian matrix of the model.
"""
params = np.asarray(params)
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = 2 - self.parameterization
y = self.endog
mu = self.predict(params)
mu_p = mu**p
a1 = mu_p / alpha
a2 = mu + a1
a3 = y + a1
a4 = p * a1 / mu
a5 = a4 * p / mu
dgpart = digamma(a3) - digamma(a1)
coeff = mu**2 * (((1 + a4)**2 * a3 / a2**2 -
a3 * (a5 - a4 / mu) / a2 -
y / mu**2 -
2 * a4 * (1 + a4) / a2 +
a5 * (np.log(a1) - np.log(a2) + dgpart + 2) -
a4 * (np.log(a1) - np.log(a2) + dgpart + 1) / mu -
a4**2 * (polygamma(1, a1) - polygamma(1, a3))) +
(-(1 + a4) * a3 / a2 +
y / mu +
a4 * (np.log(a1) - np.log(a2) + dgpart + 1)) / mu)
hfbb = coeff
hfba = (mu * a1 *
((1 + a4) * (1 - a3 / a2) / a2 -
p * (np.log(a1 / a2) + dgpart + 2) / mu +
p * (a3 / mu + a4) / a2 +
a4 * (polygamma(1, a1) - polygamma(1, a3))) / alpha)
hfaa = (a1 * (2 * np.log(a1 / a2) +
2 * dgpart + 3 -
2 * a3 / a2 - a1 * polygamma(1, a1) +
a1 * polygamma(1, a3) - 2 * a1 / a2 +
a1 * a3 / a2**2) / alpha**2)
return hfbb, hfba, hfaa | Generalized Negative Binomial (NB-P) model hessian maxtrix of the log-likelihood
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
hessian : ndarray, 2-D
The hessian matrix of the model. | hessian_factor | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None, use_transparams=False,
cov_type='nonrobust', cov_kwds=None, use_t=None,
optim_kwds_prelim=None, **kwargs):
# TODO: Fix doc string
"""
use_transparams : bool
This parameter enable internal transformation to impose
non-negativity. True to enable. Default is False.
use_transparams=True imposes the no underdispersion (alpha > 0)
constraint. In case use_transparams=True and method="newton" or
"ncg" transformation is ignored.
"""
if use_transparams and method not in ['newton', 'ncg']:
self._transparams = True
else:
if use_transparams:
warnings.warn('Parameter "use_transparams" is ignored',
RuntimeWarning)
self._transparams = False
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
kwds_prelim = {'disp': 0, 'skip_hessian': True, 'warn_convergence': False}
if optim_kwds_prelim is not None:
kwds_prelim.update(optim_kwds_prelim)
mod_poi = Poisson(self.endog, self.exog, offset=offset)
with warnings.catch_warnings():
warnings.simplefilter("always")
res_poi = mod_poi.fit(**kwds_prelim)
start_params = res_poi.params
a = self._estimate_dispersion(res_poi.predict(), res_poi.resid,
df_resid=res_poi.df_resid)
start_params = np.append(start_params, max(0.05, a))
if callback is None:
# work around perfect separation callback #3895
def callback(*x):
return x
mlefit = super().fit(start_params=start_params,
maxiter=maxiter, method=method, disp=disp,
full_output=full_output, callback=callback,
**kwargs)
if optim_kwds_prelim is not None:
mlefit.mle_settings["optim_kwds_prelim"] = optim_kwds_prelim
if use_transparams and method not in ["newton", "ncg"]:
self._transparams = False
mlefit._results.params[-1] = np.exp(mlefit._results.params[-1])
nbinfit = NegativeBinomialPResults(self, mlefit._results)
result = NegativeBinomialPResultsWrapper(nbinfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result | use_transparams : bool
This parameter enable internal transformation to impose
non-negativity. True to enable. Default is False.
use_transparams=True imposes the no underdispersion (alpha > 0)
constraint. In case use_transparams=True and method="newton" or
"ncg" transformation is ignored. | fit | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
sf = self.score_factor(params, endog=y)
return np.column_stack(sf)
dsf = _approx_fprime_cs_scalar(self.endog[:, None], f)
# deriv is 2d vector
d1 = dsf[:, :1] * self.exog
d2 = dsf[:, 1:2]
return np.column_stack((d1, d2)) | derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog. | _deriv_score_obs_dendog | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def _var(self, mu, params=None):
"""variance implied by the distribution
internal use, will be refactored or removed
"""
alpha = params[-1]
p = self.parameterization # no `-1` as in GPP
var_ = mu * (1 + alpha * mu**(p - 1))
return var_ | variance implied by the distribution
internal use, will be refactored or removed | _var | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def _prob_nonzero(self, mu, params):
"""Probability that count is not zero
internal use in Censored model, will be refactored or removed
"""
alpha = params[-1]
p = self.parameterization
prob_nz = 1 - (1 + alpha * mu**(p-1))**(- 1 / alpha)
return prob_nz | Probability that count is not zero
internal use in Censored model, will be refactored or removed | _prob_nonzero | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def get_distribution(self, params, exog=None, exposure=None, offset=None):
"""get frozen instance of distribution
"""
mu = self.predict(params, exog=exog, exposure=exposure, offset=offset)
size, prob = self.convert_params(params, mu)
# distr = nbinom(size[:, None], prob[:, None])
distr = nbinom(size, prob)
return distr | get frozen instance of distribution | get_distribution | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def prsquared(self):
"""
McFadden's pseudo-R-squared. `1 - (llf / llnull)`
"""
return 1 - self.llf/self.llnull | McFadden's pseudo-R-squared. `1 - (llf / llnull)` | prsquared | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def llr(self):
"""
Likelihood ratio chi-squared statistic; `-2*(llnull - llf)`
"""
return -2*(self.llnull - self.llf) | Likelihood ratio chi-squared statistic; `-2*(llnull - llf)` | llr | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def llr_pvalue(self):
"""
The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`.
"""
return stats.distributions.chi2.sf(self.llr, self.df_model) | The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`. | llr_pvalue | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def set_null_options(self, llnull=None, attach_results=True, **kwargs):
"""
Set the fit options for the Null (constant-only) model.
This resets the cache for related attributes which is potentially
fragile. This only sets the option, the null model is estimated
when llnull is accessed, if llnull is not yet in cache.
Parameters
----------
llnull : {None, float}
If llnull is not None, then the value will be directly assigned to
the cached attribute "llnull".
attach_results : bool
Sets an internal flag whether the results instance of the null
model should be attached. By default without calling this method,
thenull model results are not attached and only the loglikelihood
value llnull is stored.
**kwargs
Additional keyword arguments used as fit keyword arguments for the
null model. The override and model default values.
Notes
-----
Modifies attributes of this instance, and so has no return.
"""
# reset cache, note we need to add here anything that depends on
# llnullor the null model. If something is missing, then the attribute
# might be incorrect.
self._cache.pop('llnull', None)
self._cache.pop('llr', None)
self._cache.pop('llr_pvalue', None)
self._cache.pop('prsquared', None)
if hasattr(self, 'res_null'):
del self.res_null
if llnull is not None:
self._cache['llnull'] = llnull
self._attach_nullmodel = attach_results
self._optim_kwds_null = kwargs | Set the fit options for the Null (constant-only) model.
This resets the cache for related attributes which is potentially
fragile. This only sets the option, the null model is estimated
when llnull is accessed, if llnull is not yet in cache.
Parameters
----------
llnull : {None, float}
If llnull is not None, then the value will be directly assigned to
the cached attribute "llnull".
attach_results : bool
Sets an internal flag whether the results instance of the null
model should be attached. By default without calling this method,
thenull model results are not attached and only the loglikelihood
value llnull is stored.
**kwargs
Additional keyword arguments used as fit keyword arguments for the
null model. The override and model default values.
Notes
-----
Modifies attributes of this instance, and so has no return. | set_null_options | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def llnull(self):
"""
Value of the constant-only loglikelihood
"""
model = self.model
kwds = model._get_init_kwds().copy()
for key in getattr(model, '_null_drop_keys', []):
del kwds[key]
# TODO: what parameters to pass to fit?
mod_null = model.__class__(model.endog, np.ones(self.nobs), **kwds)
# TODO: consider catching and warning on convergence failure?
# in the meantime, try hard to converge. see
# TestPoissonConstrained1a.test_smoke
optim_kwds = getattr(self, '_optim_kwds_null', {}).copy()
if 'start_params' in optim_kwds:
# user provided
sp_null = optim_kwds.pop('start_params')
elif hasattr(model, '_get_start_params_null'):
# get moment estimates if available
sp_null = model._get_start_params_null()
else:
sp_null = None
opt_kwds = dict(method='bfgs', warn_convergence=False, maxiter=10000,
disp=0)
opt_kwds.update(optim_kwds)
if optim_kwds:
res_null = mod_null.fit(start_params=sp_null, **opt_kwds)
else:
# this should be a reasonably method case across versions
res_null = mod_null.fit(start_params=sp_null, method='nm',
warn_convergence=False,
maxiter=10000, disp=0)
res_null = mod_null.fit(start_params=res_null.params, method='bfgs',
warn_convergence=False,
maxiter=10000, disp=0)
if getattr(self, '_attach_nullmodel', False) is not False:
self.res_null = res_null
return res_null.llf | Value of the constant-only loglikelihood | llnull | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def fittedvalues(self):
"""
Linear predictor XB.
"""
return np.dot(self.model.exog, self.params[:self.model.exog.shape[1]]) | Linear predictor XB. | fittedvalues | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def resid_response(self):
"""
Respnose residuals. The response residuals are defined as
`endog - fittedvalues`
"""
return self.model.endog - self.predict() | Respnose residuals. The response residuals are defined as
`endog - fittedvalues` | resid_response | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def resid_pearson(self):
"""
Pearson residuals defined as response residuals divided by standard
deviation implied by the model.
"""
var_ = self.predict(which="var")
return self.resid_response / np.sqrt(var_) | Pearson residuals defined as response residuals divided by standard
deviation implied by the model. | resid_pearson | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def aic(self):
"""
Akaike information criterion. `-2*(llf - p)` where `p` is the number
of regressors including the intercept.
"""
k_extra = getattr(self.model, 'k_extra', 0)
return -2*(self.llf - (self.df_model + 1 + k_extra)) | Akaike information criterion. `-2*(llf - p)` where `p` is the number
of regressors including the intercept. | aic | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def bic(self):
"""
Bayesian information criterion. `-2*llf + ln(nobs)*p` where `p` is the
number of regressors including the intercept.
"""
k_extra = getattr(self.model, 'k_extra', 0)
return -2*self.llf + np.log(self.nobs)*(self.df_model + 1 + k_extra) | Bayesian information criterion. `-2*llf + ln(nobs)*p` where `p` is the
number of regressors including the intercept. | bic | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def info_criteria(self, crit, dk_params=0):
"""Return an information criterion for the model.
Parameters
----------
crit : string
One of 'aic', 'bic', 'tic' or 'gbic'.
dk_params : int or float
Correction to the number of parameters used in the information
criterion.
Returns
-------
Value of information criterion.
Notes
-----
Tic and gbic
References
----------
Burnham KP, Anderson KR (2002). Model Selection and Multimodel
Inference; Springer New York.
"""
crit = crit.lower()
k_extra = getattr(self.model, 'k_extra', 0)
k_params = self.df_model + 1 + k_extra + dk_params
if crit == "aic":
return -2 * self.llf + 2 * k_params
elif crit == "bic":
nobs = self.df_model + self.df_resid + 1
bic = -2*self.llf + k_params*np.log(nobs)
return bic
elif crit == "tic":
return pinfer.tic(self)
elif crit == "gbic":
return pinfer.gbic(self)
else:
raise ValueError("Name of information criterion not recognized.") | Return an information criterion for the model.
Parameters
----------
crit : string
One of 'aic', 'bic', 'tic' or 'gbic'.
dk_params : int or float
Correction to the number of parameters used in the information
criterion.
Returns
-------
Value of information criterion.
Notes
-----
Tic and gbic
References
----------
Burnham KP, Anderson KR (2002). Model Selection and Multimodel
Inference; Springer New York. | info_criteria | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def get_prediction(self, exog=None,
transform=True, which="mean", linear=None,
row_labels=None, average=False,
agg_weights=None, y_values=None,
**kwargs):
"""
Compute prediction results when endpoint transformation is valid.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
which : str
Which statistic is to be predicted. Default is "mean".
The available statistics and options depend on the model.
see the model.predict docstring
linear : bool
Linear has been replaced by the `which` keyword and will be
deprecated.
If linear is True, then `which` is ignored and the linear
prediction is returned.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
average : bool
If average is True, then the mean prediction is computed, that is,
predictions are computed for individual exog and then the average
over observation is used.
If average is False, then the results are the predictions for all
observations, i.e. same length as ``exog``.
agg_weights : ndarray, optional
Aggregation weights, only used if average is True.
The weights are not normalized.
y_values : None or nd_array
Some predictive statistics like which="prob" are computed at
values of the response variable. If y_values is not None, then
it will be used instead of the default set of y_values.
**Warning:** ``which="prob"`` for count models currently computes
the pmf for all y=k up to max(endog). This can be a large array if
the observed endog values are large.
This will likely change so that the set of y_values will be chosen
to limit the array size.
**kwargs :
Some models can take additional keyword arguments, such as offset,
exposure or additional exog in multi-part models like zero inflated
models.
See the predict method of the model for the details.
Returns
-------
prediction_results : PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and
summary dataframe for the prediction.
Notes
-----
Status: new in 0.14, experimental
"""
if linear is True:
# compatibility with old keyword
which = "linear"
pred_kwds = kwargs
# y_values is explicit so we can add it to the docstring
if y_values is not None:
pred_kwds["y_values"] = y_values
res = pred.get_prediction(
self,
exog=exog,
which=which,
transform=transform,
row_labels=row_labels,
average=average,
agg_weights=agg_weights,
pred_kwds=pred_kwds
)
return res | Compute prediction results when endpoint transformation is valid.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
which : str
Which statistic is to be predicted. Default is "mean".
The available statistics and options depend on the model.
see the model.predict docstring
linear : bool
Linear has been replaced by the `which` keyword and will be
deprecated.
If linear is True, then `which` is ignored and the linear
prediction is returned.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
average : bool
If average is True, then the mean prediction is computed, that is,
predictions are computed for individual exog and then the average
over observation is used.
If average is False, then the results are the predictions for all
observations, i.e. same length as ``exog``.
agg_weights : ndarray, optional
Aggregation weights, only used if average is True.
The weights are not normalized.
y_values : None or nd_array
Some predictive statistics like which="prob" are computed at
values of the response variable. If y_values is not None, then
it will be used instead of the default set of y_values.
**Warning:** ``which="prob"`` for count models currently computes
the pmf for all y=k up to max(endog). This can be a large array if
the observed endog values are large.
This will likely change so that the set of y_values will be chosen
to limit the array size.
**kwargs :
Some models can take additional keyword arguments, such as offset,
exposure or additional exog in multi-part models like zero inflated
models.
See the predict method of the model for the details.
Returns
-------
prediction_results : PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and
summary dataframe for the prediction.
Notes
-----
Status: new in 0.14, experimental | get_prediction | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available from the returned object.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semi-elasticity -- dy/d(lnx)
- 'eydx' - estimate semi-elasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables. For interpretations of these methods
see notes below.
atexog : array_like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
DiscreteMargins : marginal effects instance
Returns an object that holds the marginal effects, standard
errors, confidence intervals, etc. See
`statsmodels.discrete.discrete_margins.DiscreteMargins` for more
information.
Notes
-----
Interpretations of methods:
- 'dydx' - change in `endog` for a change in `exog`.
- 'eyex' - proportional change in `endog` for a proportional change
in `exog`.
- 'dyex' - change in `endog` for a proportional change in `exog`.
- 'eydx' - proportional change in `endog` for a change in `exog`.
When using after Poisson, returns the expected number of events per
period, assuming that the model is loglinear.
"""
if getattr(self.model, "offset", None) is not None:
raise NotImplementedError("Margins with offset are not available.")
from statsmodels.discrete.discrete_margins import DiscreteMargins
return DiscreteMargins(self, (at, method, atexog, dummy, count)) | Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available from the returned object.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semi-elasticity -- dy/d(lnx)
- 'eydx' - estimate semi-elasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables. For interpretations of these methods
see notes below.
atexog : array_like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
DiscreteMargins : marginal effects instance
Returns an object that holds the marginal effects, standard
errors, confidence intervals, etc. See
`statsmodels.discrete.discrete_margins.DiscreteMargins` for more
information.
Notes
-----
Interpretations of methods:
- 'dydx' - change in `endog` for a change in `exog`.
- 'eyex' - proportional change in `endog` for a proportional change
in `exog`.
- 'dyex' - change in `endog` for a proportional change in `exog`.
- 'eydx' - proportional change in `endog` for a change in `exog`.
When using after Poisson, returns the expected number of events per
period, assuming that the model is loglinear. | get_margeff | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def get_influence(self):
"""
Get an instance of MLEInfluence with influence and outlier measures
Returns
-------
infl : MLEInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
"""
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self) | Get an instance of MLEInfluence with influence and outlier measures
Returns
-------
infl : MLEInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence | get_influence | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
"""
Summarize the Regression Results.
Parameters
----------
yname : str, optional
The name of the endog variable in the tables. The default is `y`.
xname : list[str], optional
The names for the exogenous variables, default is "var_xx".
Must match the number of parameters in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
Returns
-------
Summary
Class that holds the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : Class that hold summary results.
"""
top_left = [('Dep. Variable:', None),
('Model:', [self.model.__class__.__name__]),
('Method:', [self.method]),
('Date:', None),
('Time:', None),
('converged:', ["%s" % self.mle_retvals['converged']]),
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
('Pseudo R-squ.:', ["%#6.4g" % self.prsquared]),
('Log-Likelihood:', None),
('LL-Null:', ["%#8.5g" % self.llnull]),
('LLR p-value:', ["%#6.4g" % self.llr_pvalue])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
# boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
yname, yname_list = self._get_endog_name(yname, yname_list)
# for top of table
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
# for parameters, etc
smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha,
use_t=self.use_t)
if hasattr(self, 'constraints'):
smry.add_extra_txt(['Model has been estimated subject to linear '
'equality constraints.'])
return smry | Summarize the Regression Results.
Parameters
----------
yname : str, optional
The name of the endog variable in the tables. The default is `y`.
xname : list[str], optional
The names for the exogenous variables, default is "var_xx".
Must match the number of parameters in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
Returns
-------
Summary
Class that holds the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : Class that hold summary results. | summary | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""
Experimental function to summarize regression results.
Parameters
----------
yname : str
Name of the dependent variable (optional).
xname : list[str], optional
List of strings of length equal to the number of parameters
Names of the independent variables (optional).
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
float_format : str
The print format for floats in parameters summary.
Returns
-------
Summary
Instance that contains the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : Class that holds summary results.
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
if hasattr(self, 'constraints'):
smry.add_text('Model has been estimated subject to linear '
'equality constraints.')
return smry | Experimental function to summarize regression results.
Parameters
----------
yname : str
Name of the dependent variable (optional).
xname : list[str], optional
List of strings of length equal to the number of parameters
Names of the independent variables (optional).
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
float_format : str
The print format for floats in parameters summary.
Returns
-------
Summary
Instance that contains the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : Class that holds summary results. | summary2 | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def resid(self):
"""
Residuals
Notes
-----
The residuals for Count models are defined as
.. math:: y - p
where :math:`p = \\exp(X\\beta)`. Any exposure and offset variables
are also handled.
"""
return self.model.endog - self.predict() | Residuals
Notes
-----
The residuals for Count models are defined as
.. math:: y - p
where :math:`p = \\exp(X\\beta)`. Any exposure and offset variables
are also handled. | resid | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def get_diagnostic(self, y_max=None):
"""
Get instance of class with specification and diagnostic methods.
experimental, API of Diagnostic classes will change
Returns
-------
CountDiagnostic instance
The instance has methods to perform specification and diagnostic
tesst and plots
See Also
--------
statsmodels.statsmodels.discrete.diagnostic.CountDiagnostic
"""
from statsmodels.discrete.diagnostic import CountDiagnostic
return CountDiagnostic(self, y_max=y_max) | Get instance of class with specification and diagnostic methods.
experimental, API of Diagnostic classes will change
Returns
-------
CountDiagnostic instance
The instance has methods to perform specification and diagnostic
tesst and plots
See Also
--------
statsmodels.statsmodels.discrete.diagnostic.CountDiagnostic | get_diagnostic | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def lnalpha(self):
"""Natural log of alpha"""
return np.log(self.params[-1]) | Natural log of alpha | lnalpha | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def lnalpha_std_err(self):
"""Natural log of standardized error"""
return self.bse[-1] / self.params[-1] | Natural log of standardized error | lnalpha_std_err | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def predict_prob(self, n=None, exog=None, exposure=None, offset=None,
transform=True):
"""
Return predicted probability of each count level for each observation
Parameters
----------
n : array_like or int
The counts for which you want the probabilities. If n is None
then the probabilities for each count from 0 to max(y) are
given.
Returns
-------
ndarray
A nobs x n array where len(`n`) columns are indexed by the count
n. If n is None, then column 0 is the probability that each
observation is 0, column 1 is the probability that each
observation is 1, etc.
"""
if n is not None:
counts = np.atleast_2d(n)
else:
counts = np.atleast_2d(np.arange(0, np.max(self.model.endog)+1))
mu = self.predict(exog=exog, exposure=exposure, offset=offset,
transform=transform, which="mean")[:,None]
# uses broadcasting
return stats.poisson.pmf(counts, mu) | Return predicted probability of each count level for each observation
Parameters
----------
n : array_like or int
The counts for which you want the probabilities. If n is None
then the probabilities for each count from 0 to max(y) are
given.
Returns
-------
ndarray
A nobs x n array where len(`n`) columns are indexed by the count
n. If n is None, then column 0 is the probability that each
observation is 0, column 1 is the probability that each
observation is 1, etc. | predict_prob | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def resid_pearson(self):
"""
Pearson residuals
Notes
-----
Pearson residuals are defined to be
.. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}}
where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1.
"""
# Pearson residuals
p = self.predict() # fittedvalues is still linear
return (self.model.endog - p)/np.sqrt(p) | Pearson residuals
Notes
-----
Pearson residuals are defined to be
.. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}}
where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1. | resid_pearson | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def get_influence(self):
"""
Get an instance of MLEInfluence with influence and outlier measures
Returns
-------
infl : MLEInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
"""
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self) | Get an instance of MLEInfluence with influence and outlier measures
Returns
-------
infl : MLEInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence | get_influence | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def get_diagnostic(self, y_max=None):
"""
Get instance of class with specification and diagnostic methods
experimental, API of Diagnostic classes will change
Returns
-------
PoissonDiagnostic instance
The instance has methods to perform specification and diagnostic
tesst and plots
See Also
--------
statsmodels.statsmodels.discrete.diagnostic.PoissonDiagnostic
"""
from statsmodels.discrete.diagnostic import PoissonDiagnostic
return PoissonDiagnostic(self, y_max=y_max) | Get instance of class with specification and diagnostic methods
experimental, API of Diagnostic classes will change
Returns
-------
PoissonDiagnostic instance
The instance has methods to perform specification and diagnostic
tesst and plots
See Also
--------
statsmodels.statsmodels.discrete.diagnostic.PoissonDiagnostic | get_diagnostic | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def pred_table(self, threshold=.5):
"""
Prediction table
Parameters
----------
threshold : scalar
Number between 0 and 1. Threshold above which a prediction is
considered 1 and below which a prediction is considered 0.
Notes
-----
pred_table[i,j] refers to the number of times "i" was observed and
the model predicted "j". Correct predictions are along the diagonal.
"""
model = self.model
actual = model.endog
pred = np.array(self.predict() > threshold, dtype=float)
bins = np.array([0, 0.5, 1])
return np.histogram2d(actual, pred, bins=bins)[0] | Prediction table
Parameters
----------
threshold : scalar
Number between 0 and 1. Threshold above which a prediction is
considered 1 and below which a prediction is considered 0.
Notes
-----
pred_table[i,j] refers to the number of times "i" was observed and
the model predicted "j". Correct predictions are along the diagonal. | pred_table | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def resid_dev(self):
"""
Deviance residuals
Notes
-----
Deviance residuals are defined
.. math:: d_j = \\pm\\left(2\\left[Y_j\\ln\\left(\\frac{Y_j}{M_jp_j}\\right) + (M_j - Y_j\\ln\\left(\\frac{M_j-Y_j}{M_j(1-p_j)} \\right) \\right] \\right)^{1/2}
where
:math:`p_j = cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1.
"""
#These are the deviance residuals
#model = self.model
endog = self.model.endog
#exog = model.exog
# M = # of individuals that share a covariate pattern
# so M[i] = 2 for i = two share a covariate pattern
M = 1
p = self.predict()
#Y_0 = np.where(exog == 0)
#Y_M = np.where(exog == M)
#NOTE: Common covariate patterns are not yet handled
res = -(1-endog)*np.sqrt(2*M*np.abs(np.log(1-p))) + \
endog*np.sqrt(2*M*np.abs(np.log(p)))
return res | Deviance residuals
Notes
-----
Deviance residuals are defined
.. math:: d_j = \\pm\\left(2\\left[Y_j\\ln\\left(\\frac{Y_j}{M_jp_j}\\right) + (M_j - Y_j\\ln\\left(\\frac{M_j-Y_j}{M_j(1-p_j)} \\right) \\right] \\right)^{1/2}
where
:math:`p_j = cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1. | resid_dev | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def resid_pearson(self):
"""
Pearson residuals
Notes
-----
Pearson residuals are defined to be
.. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}}
where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1.
"""
# Pearson residuals
#model = self.model
endog = self.model.endog
#exog = model.exog
# M = # of individuals that share a covariate pattern
# so M[i] = 2 for i = two share a covariate pattern
# use unique row pattern?
M = 1
p = self.predict()
return (endog - M*p)/np.sqrt(M*p*(1-p)) | Pearson residuals
Notes
-----
Pearson residuals are defined to be
.. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}}
where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1. | resid_pearson | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def resid_response(self):
"""
The response residuals
Notes
-----
Response residuals are defined to be
.. math:: y - p
where :math:`p=cdf(X\\beta)`.
"""
return self.model.endog - self.predict() | The response residuals
Notes
-----
Response residuals are defined to be
.. math:: y - p
where :math:`p=cdf(X\\beta)`. | resid_response | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def resid_generalized(self):
"""
Generalized residuals
Notes
-----
The generalized residuals for the Logit model are defined
.. math:: y - p
where :math:`p=cdf(X\\beta)`. This is the same as the `resid_response`
for the Logit model.
"""
# Generalized residuals
return self.model.endog - self.predict() | Generalized residuals
Notes
-----
The generalized residuals for the Logit model are defined
.. math:: y - p
where :math:`p=cdf(X\\beta)`. This is the same as the `resid_response`
for the Logit model. | resid_generalized | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def get_influence(self):
"""
Get an instance of MLEInfluence with influence and outlier measures
Returns
-------
infl : MLEInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
"""
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self) | Get an instance of MLEInfluence with influence and outlier measures
Returns
-------
infl : MLEInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence | get_influence | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def resid_generalized(self):
"""
Generalized residuals
Notes
-----
The generalized residuals for the Probit model are defined
.. math:: y\\frac{\\phi(X\\beta)}{\\Phi(X\\beta)}-(1-y)\\frac{\\phi(X\\beta)}{1-\\Phi(X\\beta)}
"""
# generalized residuals
model = self.model
endog = model.endog
XB = self.predict(which="linear")
pdf = model.pdf(XB)
cdf = model.cdf(XB)
return endog * pdf/cdf - (1-endog)*pdf/(1-cdf) | Generalized residuals
Notes
-----
The generalized residuals for the Probit model are defined
.. math:: y\\frac{\\phi(X\\beta)}{\\Phi(X\\beta)}-(1-y)\\frac{\\phi(X\\beta)}{1-\\Phi(X\\beta)} | resid_generalized | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def _get_endog_name(self, yname, yname_list, all=False):
"""
If all is False, the first variable name is dropped
"""
model = self.model
if yname is None:
yname = model.endog_names
if yname_list is None:
ynames = model._ynames_map
ynames = self._maybe_convert_ynames_int(ynames)
# use range below to ensure sortedness
ynames = [ynames[key] for key in range(int(model.J))]
ynames = ['='.join([yname, name]) for name in ynames]
if not all:
yname_list = ynames[1:] # assumes first variable is dropped
else:
yname_list = ynames
return yname, yname_list | If all is False, the first variable name is dropped | _get_endog_name | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def pred_table(self):
"""
Returns the J x J prediction table.
Notes
-----
pred_table[i,j] refers to the number of times "i" was observed and
the model predicted "j". Correct predictions are along the diagonal.
"""
ju = self.model.J - 1 # highest index
# these are the actual, predicted indices
#idx = lzip(self.model.endog, self.predict().argmax(1))
bins = np.concatenate(([0], np.linspace(0.5, ju - 0.5, ju), [ju]))
return np.histogram2d(self.model.endog, self.predict().argmax(1),
bins=bins)[0] | Returns the J x J prediction table.
Notes
-----
pred_table[i,j] refers to the number of times "i" was observed and
the model predicted "j". Correct predictions are along the diagonal. | pred_table | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def get_prediction(self):
"""Not implemented for Multinomial
"""
raise NotImplementedError | Not implemented for Multinomial | get_prediction | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def resid_misclassified(self):
"""
Residuals indicating which observations are misclassified.
Notes
-----
The residuals for the multinomial model are defined as
.. math:: argmax(y_i) \\neq argmax(p_i)
where :math:`argmax(y_i)` is the index of the category for the
endogenous variable and :math:`argmax(p_i)` is the index of the
predicted probabilities for each category. That is, the residual
is a binary indicator that is 0 if the category with the highest
predicted probability is the same as that of the observed variable
and 1 otherwise.
"""
# it's 0 or 1 - 0 for correct prediction and 1 for a missed one
return (self.model.wendog.argmax(1) !=
self.predict().argmax(1)).astype(float) | Residuals indicating which observations are misclassified.
Notes
-----
The residuals for the multinomial model are defined as
.. math:: argmax(y_i) \\neq argmax(p_i)
where :math:`argmax(y_i)` is the index of the category for the
endogenous variable and :math:`argmax(p_i)` is the index of the
predicted probabilities for each category. That is, the residual
is a binary indicator that is 0 if the category with the highest
predicted probability is the same as that of the observed variable
and 1 otherwise. | resid_misclassified | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def summary2(self, alpha=0.05, float_format="%.4f"):
"""Experimental function to summarize regression results
Parameters
----------
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_dict(summary2.summary_model(self))
# One data frame per value of endog
eqn = self.params.shape[1]
confint = self.conf_int(alpha)
for i in range(eqn):
coefs = summary2.summary_params((self, self.params[:, i],
self.bse[:, i],
self.tvalues[:, i],
self.pvalues[:, i],
confint[i]),
alpha=alpha)
# Header must show value of endog
level_str = self.model.endog_names + ' = ' + str(i)
coefs[level_str] = coefs.index
coefs = coefs.iloc[:, [-1, 0, 1, 2, 3, 4, 5]]
smry.add_df(coefs, index=False, header=True,
float_format=float_format)
smry.add_title(results=self)
return smry | Experimental function to summarize regression results
Parameters
----------
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results | summary2 | python | statsmodels/statsmodels | statsmodels/discrete/discrete_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/discrete_model.py | BSD-3-Clause |
def test_logit_formula():
"""Test that ConditionalLogit uses the right environment for formulas."""
def times_two(x):
return 2 * x
groups = np.repeat([0, 1], 50)
exog = np.linspace(-2, 2, len(groups))
error = np.linspace(-1, 1, len(groups)) # Needed for within-group variance
logit_link = 1 / (1 + np.exp(exog + groups)) + error
endog = (logit_link > 0.5).astype(int)
data = pd.DataFrame({"exog": exog, "groups": groups, "endog": endog})
result_direct = ConditionalLogit(endog, times_two(exog), groups=groups).fit()
result_formula = ConditionalLogit.from_formula(
"endog ~ 0 + times_two(exog)", groups="groups", data=data
).fit()
assert_allclose(result_direct.params, result_formula.params)
assert_allclose(result_direct.bse, result_formula.bse) | Test that ConditionalLogit uses the right environment for formulas. | test_logit_formula | python | statsmodels/statsmodels | statsmodels/discrete/tests/test_conditional.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/tests/test_conditional.py | BSD-3-Clause |
def __init__(self):
"""r
Results are from Stata 11 (checked vs R nnet package).
"""
self.nobs = 944 | r
Results are from Stata 11 (checked vs R nnet package). | __init__ | python | statsmodels/statsmodels | statsmodels/discrete/tests/results/results_discrete.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/tests/results/results_discrete.py | BSD-3-Clause |
def __init__(self):
"""
Special results for L1 models
Uses the Spector data and a script to generate the baseline results
"""
pass | Special results for L1 models
Uses the Spector data and a script to generate the baseline results | __init__ | python | statsmodels/statsmodels | statsmodels/discrete/tests/results/results_discrete.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/tests/results/results_discrete.py | BSD-3-Clause |
def logit():
"""
Results generated with:
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = 3 * np.array([0, 1, 1, 1])
res2 = sm.Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000)
"""
obj = Namespace()
nan = np.nan
obj.params = [-4.10271595, 0., 0.15493781, 0.]
obj.conf_int = [
[-9.15205122, 0.94661932],
[nan, nan],
[-0.06539482, 0.37527044],
[nan, nan]]
obj.bse = [2.5762388, nan, 0.11241668, nan]
obj.nnz_params = 2
obj.aic = 42.091439368583671
obj.bic = 45.022911174183122
obj.cov_params = [
[6.63700638, nan, -0.28636261, nan],
[nan, nan, nan, nan],
[-0.28636261, nan, 0.01263751, nan],
[nan, nan, nan, nan]]
return obj | Results generated with:
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = 3 * np.array([0, 1, 1, 1])
res2 = sm.Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000) | logit | python | statsmodels/statsmodels | statsmodels/discrete/tests/results/results_discrete.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/tests/results/results_discrete.py | BSD-3-Clause |
def sweep():
"""
Results generated with
params = np.zeros((3, 4))
alphas = np.array(
[[0.1, 0.1, 0.1, 0.1],
[0.4, 0.4, 0.5, 0.5], [0.5, 0.5, 1, 1]])
model = sm.Logit(data.endog, data.exog)
for i in range(3):
alpha = alphas[i, :]
res2 = model.fit_regularized(method="l1", alpha=alpha,
disp=0, acc=1e-10,
maxiter=1000, trim_mode='off')
params[i, :] = res2.params
print(params)
"""
obj = Namespace()
obj.params = [
[-10.37593611, 2.27080968, 0.06670638, 2.05723691],
[-5.32670811, 1.18216019, 0.01402395, 1.45178712],
[-3.92630318, 0.90126958, -0., 1.09498178]]
return obj | Results generated with
params = np.zeros((3, 4))
alphas = np.array(
[[0.1, 0.1, 0.1, 0.1],
[0.4, 0.4, 0.5, 0.5], [0.5, 0.5, 1, 1]])
model = sm.Logit(data.endog, data.exog)
for i in range(3):
alpha = alphas[i, :]
res2 = model.fit_regularized(method="l1", alpha=alpha,
disp=0, acc=1e-10,
maxiter=1000, trim_mode='off')
params[i, :] = res2.params
print(params) | sweep | python | statsmodels/statsmodels | statsmodels/discrete/tests/results/results_discrete.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/tests/results/results_discrete.py | BSD-3-Clause |
def probit():
"""
Results generated with
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0.1, 0.2, 0.3, 10])
res2 = sm.Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000)
"""
obj = Namespace()
nan = np.nan
obj.params = [-5.40476992, 1.25018458, 0.04744558, 0.]
obj.conf_int = [
[-9.44077951, -1.36876033],
[0.03716721, 2.46320194],
[-0.09727571, 0.19216687],
[np.nan, np.nan]]
obj.bse = [2.05922641, 0.61889778, 0.07383875, np.nan]
obj.nnz_params = 3
obj.aic = 38.399773877542927
obj.bic = 42.796981585942106
obj.cov_params = [
[4.24041339, -0.83432592, -0.06827915, nan],
[-0.83432592, 0.38303447, -0.01700249, nan],
[-0.06827915, -0.01700249, 0.00545216, nan],
[nan, nan, nan, nan]]
return obj | Results generated with
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0.1, 0.2, 0.3, 10])
res2 = sm.Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000) | probit | python | statsmodels/statsmodels | statsmodels/discrete/tests/results/results_discrete.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/tests/results/results_discrete.py | BSD-3-Clause |
def mnlogit():
"""
Results generated with
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))
alpha[-1, :] = 0
mlogit_l1_res = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
acc=1e-10)
"""
obj = Namespace()
obj.params = [
[0.00100163, -0.05864195, -0.06147822, -0.04769671, -0.05222987,
-0.09522432],
[0., 0.03186139, 0.12048999, 0.83211915, 0.92330292,
1.5680646],
[-0.0218185, -0.01988066, -0.00808564, -0.00487463, -0.01400173,
-0.00562079],
[0., 0.03306875, 0., 0.02362861, 0.05486435,
0.14656966],
[0., 0.04448213, 0.03252651, 0.07661761, 0.07265266,
0.0967758],
[0.90993803, -0.50081247, -2.08285102, -5.26132955, -4.86783179,
-9.31537963]]
obj.conf_int = [
[[-0.0646223, 0.06662556],
[np.nan, np.nan],
[-0.03405931, -0.00957768],
[np.nan, np.nan],
[np.nan, np.nan],
[0.26697895, 1.55289711]],
[[-0.1337913, 0.01650741],
[-0.14477255, 0.20849532],
[-0.03500303, -0.00475829],
[-0.11406121, 0.18019871],
[0.00479741, 0.08416684],
[-1.84626136, 0.84463642]],
[[-0.17237962, 0.04942317],
[-0.15146029, 0.39244026],
[-0.02947379, 0.01330252],
[np.nan, np.nan],
[-0.02501483, 0.09006785],
[-3.90379391, -0.26190812]],
[[-0.12938296, 0.03398954],
[0.62612955, 1.03810876],
[-0.02046322, 0.01071395],
[-0.13738534, 0.18464256],
[0.03017236, 0.12306286],
[-6.91227465, -3.61038444]],
[[-0.12469773, 0.02023799],
[0.742564, 1.10404183],
[-0.02791975, -0.00008371],
[-0.08491561, 0.19464431],
[0.0332926, 0.11201273],
[-6.29331126, -3.44235233]],
[[-0.17165567, -0.01879296],
[1.33994079, 1.79618841],
[-0.02027503, 0.00903345],
[-0.00267819, 0.29581751],
[0.05343135, 0.14012026],
[-11.10419107, -7.52656819]]]
obj.bse = [
[0.03348221, 0.03834221, 0.05658338, 0.04167742, 0.03697408,
0.03899631],
[np.nan, 0.09012101, 0.13875269, 0.10509867, 0.09221543,
0.11639184],
[0.00624543, 0.00771564, 0.01091253, 0.00795351, 0.00710116,
0.00747679],
[np.nan, 0.07506769, np.nan, 0.08215148, 0.07131762,
0.07614826],
[np.nan, 0.02024768, 0.02935837, 0.02369699, 0.02008204,
0.02211492],
[0.32804638, 0.68646613, 0.92906957, 0.84233441, 0.72729881,
0.91267567]]
obj.nnz_params = 32
obj.aic = 3019.4391360294126
obj.bic = 3174.6431733460686
return obj | Results generated with
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))
alpha[-1, :] = 0
mlogit_l1_res = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
acc=1e-10) | mnlogit | python | statsmodels/statsmodels | statsmodels/discrete/tests/results/results_discrete.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/tests/results/results_discrete.py | BSD-3-Clause |
def _est_loc_linear(self, bw, endog, exog, data_predict):
"""
Local linear estimator of g(x) in the regression ``y = g(x) + e``.
Parameters
----------
bw : array_like
Vector of bandwidth value(s).
endog : 1D array_like
The dependent variable.
exog : 1D or 2D array_like
The independent variable(s).
data_predict : 1D array_like of length K, where K is the number of variables.
The point at which the density is estimated.
Returns
-------
D_x : array_like
The value of the conditional mean at `data_predict`.
Notes
-----
See p. 81 in [1] and p.38 in [2] for the formulas.
Unlike other methods, this one requires that `data_predict` be 1D.
"""
nobs, k_vars = exog.shape
ker = gpke(bw, data=exog, data_predict=data_predict,
var_type=self.var_type,
ckertype=self.ckertype,
ukertype=self.ukertype,
okertype=self.okertype,
tosum=False) / float(nobs)
# Create the matrix on p.492 in [7], after the multiplication w/ K_h,ij
# See also p. 38 in [2]
#ix_cont = np.arange(self.k_vars) # Use all vars instead of continuous only
# Note: because ix_cont was defined here such that it selected all
# columns, I removed the indexing with it from exog/data_predict.
# Convert ker to a 2-D array to make matrix operations below work
ker = ker[:, np.newaxis]
M12 = exog - data_predict
M22 = np.dot(M12.T, M12 * ker)
M12 = (M12 * ker).sum(axis=0)
M = np.empty((k_vars + 1, k_vars + 1))
M[0, 0] = ker.sum()
M[0, 1:] = M12
M[1:, 0] = M12
M[1:, 1:] = M22
ker_endog = ker * endog
V = np.empty((k_vars + 1, 1))
V[0, 0] = ker_endog.sum()
V[1:, 0] = ((exog - data_predict) * ker_endog).sum(axis=0)
mean_mfx = np.dot(np.linalg.pinv(M), V)
mean = mean_mfx[0]
mfx = mean_mfx[1:, :]
return mean, mfx | Local linear estimator of g(x) in the regression ``y = g(x) + e``.
Parameters
----------
bw : array_like
Vector of bandwidth value(s).
endog : 1D array_like
The dependent variable.
exog : 1D or 2D array_like
The independent variable(s).
data_predict : 1D array_like of length K, where K is the number of variables.
The point at which the density is estimated.
Returns
-------
D_x : array_like
The value of the conditional mean at `data_predict`.
Notes
-----
See p. 81 in [1] and p.38 in [2] for the formulas.
Unlike other methods, this one requires that `data_predict` be 1D. | _est_loc_linear | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def _est_loc_constant(self, bw, endog, exog, data_predict):
"""
Local constant estimator of g(x) in the regression
y = g(x) + e
Parameters
----------
bw : array_like
Array of bandwidth value(s).
endog : 1D array_like
The dependent variable.
exog : 1D or 2D array_like
The independent variable(s).
data_predict : 1D or 2D array_like
The point(s) at which the density is estimated.
Returns
-------
G : ndarray
The value of the conditional mean at `data_predict`.
B_x : ndarray
The marginal effects.
"""
ker_x = gpke(bw, data=exog, data_predict=data_predict,
var_type=self.var_type,
ckertype=self.ckertype,
ukertype=self.ukertype,
okertype=self.okertype,
tosum=False)
ker_x = np.reshape(ker_x, np.shape(endog))
G_numer = (ker_x * endog).sum(axis=0)
G_denom = ker_x.sum(axis=0)
G = G_numer / G_denom
nobs = exog.shape[0]
f_x = G_denom / float(nobs)
ker_xc = gpke(bw, data=exog, data_predict=data_predict,
var_type=self.var_type,
ckertype='d_gaussian',
#okertype='wangryzin_reg',
tosum=False)
ker_xc = ker_xc[:, np.newaxis]
d_mx = -(endog * ker_xc).sum(axis=0) / float(nobs) #* np.prod(bw[:, ix_cont]))
d_fx = -ker_xc.sum(axis=0) / float(nobs) #* np.prod(bw[:, ix_cont]))
B_x = d_mx / f_x - G * d_fx / f_x
B_x = (G_numer * d_fx - G_denom * d_mx) / (G_denom**2)
#B_x = (f_x * d_mx - m_x * d_fx) / (f_x ** 2)
return G, B_x | Local constant estimator of g(x) in the regression
y = g(x) + e
Parameters
----------
bw : array_like
Array of bandwidth value(s).
endog : 1D array_like
The dependent variable.
exog : 1D or 2D array_like
The independent variable(s).
data_predict : 1D or 2D array_like
The point(s) at which the density is estimated.
Returns
-------
G : ndarray
The value of the conditional mean at `data_predict`.
B_x : ndarray
The marginal effects. | _est_loc_constant | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def aic_hurvich(self, bw, func=None):
"""
Computes the AIC Hurvich criteria for the estimation of the bandwidth.
Parameters
----------
bw : str or array_like
See the ``bw`` parameter of `KernelReg` for details.
Returns
-------
aic : ndarray
The AIC Hurvich criteria, one element for each variable.
func : None
Unused here, needed in signature because it's used in `cv_loo`.
References
----------
See ch.2 in [1] and p.35 in [2].
"""
H = np.empty((self.nobs, self.nobs))
for j in range(self.nobs):
H[:, j] = gpke(bw, data=self.exog, data_predict=self.exog[j,:],
ckertype=self.ckertype, ukertype=self.ukertype,
okertype=self.okertype, var_type=self.var_type,
tosum=False)
denom = H.sum(axis=1)
H = H / denom
gx = KernelReg(endog=self.endog, exog=self.exog, var_type=self.var_type,
reg_type=self.reg_type, bw=bw,
defaults=EstimatorSettings(efficient=False)).fit()[0]
gx = np.reshape(gx, (self.nobs, 1))
sigma = ((self.endog - gx)**2).sum(axis=0) / float(self.nobs)
frac = (1 + np.trace(H) / float(self.nobs)) / \
(1 - (np.trace(H) + 2) / float(self.nobs))
#siga = np.dot(self.endog.T, (I - H).T)
#sigb = np.dot((I - H), self.endog)
#sigma = np.dot(siga, sigb) / float(self.nobs)
aic = np.log(sigma) + frac
return aic | Computes the AIC Hurvich criteria for the estimation of the bandwidth.
Parameters
----------
bw : str or array_like
See the ``bw`` parameter of `KernelReg` for details.
Returns
-------
aic : ndarray
The AIC Hurvich criteria, one element for each variable.
func : None
Unused here, needed in signature because it's used in `cv_loo`.
References
----------
See ch.2 in [1] and p.35 in [2]. | aic_hurvich | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def fit(self, data_predict=None):
"""
Returns the mean and marginal effects at the `data_predict` points.
Parameters
----------
data_predict : array_like, optional
Points at which to return the mean and marginal effects. If not
given, ``data_predict == exog``.
Returns
-------
mean : ndarray
The regression result for the mean (i.e. the actual curve).
mfx : ndarray
The marginal effects, i.e. the partial derivatives of the mean.
"""
func = self.est[self.reg_type]
if data_predict is None:
data_predict = self.exog
else:
data_predict = _adjust_shape(data_predict, self.k_vars)
N_data_predict = np.shape(data_predict)[0]
mean = np.empty((N_data_predict,))
mfx = np.empty((N_data_predict, self.k_vars))
for i in range(N_data_predict):
mean_mfx = func(self.bw, self.endog, self.exog,
data_predict=data_predict[i, :])
mean[i] = np.squeeze(mean_mfx[0])
mfx_c = np.squeeze(mean_mfx[1])
mfx[i, :] = mfx_c
return mean, mfx | Returns the mean and marginal effects at the `data_predict` points.
Parameters
----------
data_predict : array_like, optional
Points at which to return the mean and marginal effects. If not
given, ``data_predict == exog``.
Returns
-------
mean : ndarray
The regression result for the mean (i.e. the actual curve).
mfx : ndarray
The marginal effects, i.e. the partial derivatives of the mean. | fit | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def sig_test(self, var_pos, nboot=50, nested_res=25, pivot=False):
"""
Significance test for the variables in the regression.
Parameters
----------
var_pos : sequence
The position of the variable in exog to be tested.
Returns
-------
sig : str
The level of significance:
- `*` : at 90% confidence level
- `**` : at 95% confidence level
- `***` : at 99* confidence level
- "Not Significant" : if not significant
"""
var_pos = np.asarray(var_pos)
ix_cont, ix_ord, ix_unord = _get_type_pos(self.var_type)
if np.any(ix_cont[var_pos]):
if np.any(ix_ord[var_pos]) or np.any(ix_unord[var_pos]):
raise ValueError("Discrete variable in hypothesis. Must be continuous")
Sig = TestRegCoefC(self, var_pos, nboot, nested_res, pivot)
else:
Sig = TestRegCoefD(self, var_pos, nboot)
return Sig.sig | Significance test for the variables in the regression.
Parameters
----------
var_pos : sequence
The position of the variable in exog to be tested.
Returns
-------
sig : str
The level of significance:
- `*` : at 90% confidence level
- `**` : at 95% confidence level
- `***` : at 99* confidence level
- "Not Significant" : if not significant | sig_test | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def __repr__(self):
"""Provide something sane to print."""
rpr = "KernelReg instance\n"
rpr += "Number of variables: k_vars = " + str(self.k_vars) + "\n"
rpr += "Number of samples: N = " + str(self.nobs) + "\n"
rpr += "Variable types: " + self.var_type + "\n"
rpr += "BW selection method: " + self._bw_method + "\n"
rpr += "Estimator type: " + self.reg_type + "\n"
return rpr | Provide something sane to print. | __repr__ | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def _get_class_vars_type(self):
"""Helper method to be able to pass needed vars to _compute_subset."""
class_type = 'KernelReg'
class_vars = (self.var_type, self.k_vars, self.reg_type)
return class_type, class_vars | Helper method to be able to pass needed vars to _compute_subset. | _get_class_vars_type | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def _compute_dispersion(self, data):
"""
Computes the measure of dispersion.
The minimum of the standard deviation and interquartile range / 1.349
References
----------
See the user guide for the np package in R.
In the notes on bwscaling option in npreg, npudens, npcdens there is
a discussion on the measure of dispersion
"""
data = data[:, 1:]
return _compute_min_std_IQR(data) | Computes the measure of dispersion.
The minimum of the standard deviation and interquartile range / 1.349
References
----------
See the user guide for the np package in R.
In the notes on bwscaling option in npreg, npudens, npcdens there is
a discussion on the measure of dispersion | _compute_dispersion | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def __repr__(self):
"""Provide something sane to print."""
rpr = "KernelCensoredReg instance\n"
rpr += "Number of variables: k_vars = " + str(self.k_vars) + "\n"
rpr += "Number of samples: nobs = " + str(self.nobs) + "\n"
rpr += "Variable types: " + self.var_type + "\n"
rpr += "BW selection method: " + self._bw_method + "\n"
rpr += "Estimator type: " + self.reg_type + "\n"
return rpr | Provide something sane to print. | __repr__ | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def _est_loc_linear(self, bw, endog, exog, data_predict, W):
"""
Local linear estimator of g(x) in the regression ``y = g(x) + e``.
Parameters
----------
bw : array_like
Vector of bandwidth value(s)
endog : 1D array_like
The dependent variable
exog : 1D or 2D array_like
The independent variable(s)
data_predict : 1D array_like of length K, where K is
the number of variables. The point at which
the density is estimated
Returns
-------
D_x : array_like
The value of the conditional mean at data_predict
Notes
-----
See p. 81 in [1] and p.38 in [2] for the formulas
Unlike other methods, this one requires that data_predict be 1D
"""
nobs, k_vars = exog.shape
ker = gpke(bw, data=exog, data_predict=data_predict,
var_type=self.var_type,
ckertype=self.ckertype,
ukertype=self.ukertype,
okertype=self.okertype, tosum=False)
# Create the matrix on p.492 in [7], after the multiplication w/ K_h,ij
# See also p. 38 in [2]
# Convert ker to a 2-D array to make matrix operations below work
ker = W * ker[:, np.newaxis]
M12 = exog - data_predict
M22 = np.dot(M12.T, M12 * ker)
M12 = (M12 * ker).sum(axis=0)
M = np.empty((k_vars + 1, k_vars + 1))
M[0, 0] = ker.sum()
M[0, 1:] = M12
M[1:, 0] = M12
M[1:, 1:] = M22
ker_endog = ker * endog
V = np.empty((k_vars + 1, 1))
V[0, 0] = ker_endog.sum()
V[1:, 0] = ((exog - data_predict) * ker_endog).sum(axis=0)
mean_mfx = np.dot(np.linalg.pinv(M), V)
mean = mean_mfx[0]
mfx = mean_mfx[1:, :]
return mean, mfx | Local linear estimator of g(x) in the regression ``y = g(x) + e``.
Parameters
----------
bw : array_like
Vector of bandwidth value(s)
endog : 1D array_like
The dependent variable
exog : 1D or 2D array_like
The independent variable(s)
data_predict : 1D array_like of length K, where K is
the number of variables. The point at which
the density is estimated
Returns
-------
D_x : array_like
The value of the conditional mean at data_predict
Notes
-----
See p. 81 in [1] and p.38 in [2] for the formulas
Unlike other methods, this one requires that data_predict be 1D | _est_loc_linear | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def fit(self, data_predict=None):
"""
Returns the marginal effects at the data_predict points.
"""
func = self.est[self.reg_type]
if data_predict is None:
data_predict = self.exog
else:
data_predict = _adjust_shape(data_predict, self.k_vars)
N_data_predict = np.shape(data_predict)[0]
mean = np.empty((N_data_predict,))
mfx = np.empty((N_data_predict, self.k_vars))
for i in range(N_data_predict):
mean_mfx = func(self.bw, self.endog, self.exog,
data_predict=data_predict[i, :],
W=self.W_in)
mean[i] = np.squeeze(mean_mfx[0])
mfx_c = np.squeeze(mean_mfx[1])
mfx[i, :] = mfx_c
return mean, mfx | Returns the marginal effects at the data_predict points. | fit | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def _compute_test_stat(self, Y, X):
"""
Computes the test statistic. See p.371 in [8].
"""
lam = self._compute_lambda(Y, X)
t = lam
if self.pivot:
se_lam = self._compute_se_lambda(Y, X)
t = lam / float(se_lam)
return t | Computes the test statistic. See p.371 in [8]. | _compute_test_stat | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def _compute_lambda(self, Y, X):
"""Computes only lambda -- the main part of the test statistic"""
n = np.shape(X)[0]
Y = _adjust_shape(Y, 1)
X = _adjust_shape(X, self.k_vars)
b = KernelReg(Y, X, self.var_type, self.model.reg_type, self.bw,
defaults = EstimatorSettings(efficient=False)).fit()[1]
b = b[:, self.test_vars]
b = np.reshape(b, (n, len(self.test_vars)))
#fct = np.std(b) # Pivot the statistic by dividing by SE
fct = 1. # Do not Pivot -- Bootstrapping works better if Pivot
lam = ((b / fct) ** 2).sum() / float(n)
return lam | Computes only lambda -- the main part of the test statistic | _compute_lambda | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def _compute_se_lambda(self, Y, X):
"""
Calculates the SE of lambda by nested resampling
Used to pivot the statistic.
Bootstrapping works better with estimating pivotal statistics
but slows down computation significantly.
"""
n = np.shape(Y)[0]
lam = np.empty(shape=(self.nres,))
for i in range(self.nres):
ind = np.random.randint(0, n, size=(n, 1))
Y1 = Y[ind, 0]
X1 = X[ind, :]
lam[i] = self._compute_lambda(Y1, X1)
se_lambda = np.std(lam)
return se_lambda | Calculates the SE of lambda by nested resampling
Used to pivot the statistic.
Bootstrapping works better with estimating pivotal statistics
but slows down computation significantly. | _compute_se_lambda | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def _compute_sig(self):
"""
Computes the significance value for the variable(s) tested.
The empirical distribution of the test statistic is obtained through
bootstrapping the sample. The null hypothesis is rejected if the test
statistic is larger than the 90, 95, 99 percentiles.
"""
t_dist = np.empty(shape=(self.nboot, ))
Y = self.endog
X = copy.deepcopy(self.exog)
n = np.shape(Y)[0]
X[:, self.test_vars] = np.mean(X[:, self.test_vars], axis=0)
# Calculate the restricted mean. See p. 372 in [8]
M = KernelReg(Y, X, self.var_type, self.model.reg_type, self.bw,
defaults=EstimatorSettings(efficient=False)).fit()[0]
M = np.reshape(M, (n, 1))
e = Y - M
e = e - np.mean(e) # recenter residuals
for i in range(self.nboot):
ind = np.random.randint(0, n, size=(n, 1))
e_boot = e[ind, 0]
Y_boot = M + e_boot
t_dist[i] = self._compute_test_stat(Y_boot, self.exog)
self.t_dist = t_dist
sig = "Not Significant"
if self.test_stat > mquantiles(t_dist, 0.9):
sig = "*"
if self.test_stat > mquantiles(t_dist, 0.95):
sig = "**"
if self.test_stat > mquantiles(t_dist, 0.99):
sig = "***"
return sig | Computes the significance value for the variable(s) tested.
The empirical distribution of the test statistic is obtained through
bootstrapping the sample. The null hypothesis is rejected if the test
statistic is larger than the 90, 95, 99 percentiles. | _compute_sig | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def _compute_test_stat(self, Y, X):
"""Computes the test statistic"""
dom_x = np.sort(np.unique(self.exog[:, self.test_vars]))
n = np.shape(X)[0]
model = KernelReg(Y, X, self.var_type, self.model.reg_type, self.bw,
defaults = EstimatorSettings(efficient=False))
X1 = copy.deepcopy(X)
X1[:, self.test_vars] = 0
m0 = model.fit(data_predict=X1)[0]
m0 = np.reshape(m0, (n, 1))
zvec = np.zeros((n, 1)) # noqa:E741
for i in dom_x[1:] :
X1[:, self.test_vars] = i
m1 = model.fit(data_predict=X1)[0]
m1 = np.reshape(m1, (n, 1))
zvec += (m1 - m0) ** 2 # noqa:E741
avg = zvec.sum(axis=0) / float(n)
return avg | Computes the test statistic | _compute_test_stat | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def _compute_sig(self):
"""Calculates the significance level of the variable tested"""
m = self._est_cond_mean()
Y = self.endog
X = self.exog
n = np.shape(X)[0]
u = Y - m
u = u - np.mean(u) # center
fct1 = (1 - 5**0.5) / 2.
fct2 = (1 + 5**0.5) / 2.
u1 = fct1 * u
u2 = fct2 * u
r = fct2 / (5 ** 0.5)
I_dist = np.empty((self.nboot,1))
for j in range(self.nboot):
u_boot = copy.deepcopy(u2)
prob = np.random.uniform(0,1, size = (n,1))
ind = prob < r
u_boot[ind] = u1[ind]
Y_boot = m + u_boot
I_dist[j] = self._compute_test_stat(Y_boot, X)
sig = "Not Significant"
if self.test_stat > mquantiles(I_dist, 0.9):
sig = "*"
if self.test_stat > mquantiles(I_dist, 0.95):
sig = "**"
if self.test_stat > mquantiles(I_dist, 0.99):
sig = "***"
return sig | Calculates the significance level of the variable tested | _compute_sig | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def _est_cond_mean(self):
"""
Calculates the expected conditional mean
m(X, Z=l) for all possible l
"""
self.dom_x = np.sort(np.unique(self.exog[:, self.test_vars]))
X = copy.deepcopy(self.exog)
m=0
for i in self.dom_x:
X[:, self.test_vars] = i
m += self.model.fit(data_predict = X)[0]
m = m / float(len(self.dom_x))
m = np.reshape(m, (np.shape(self.exog)[0], 1))
return m | Calculates the expected conditional mean
m(X, Z=l) for all possible l | _est_cond_mean | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_regression.py | BSD-3-Clause |
def lowess(endog, exog, frac=2./3, it=3):
"""
LOWESS (Locally Weighted Scatterplot Smoothing)
A lowess function that outs smoothed estimates of endog
at the given exog values from points (exog, endog)
Parameters
----------
endog : 1-D numpy array
The y-values of the observed points
exog : 1-D numpy array
The x-values of the observed points
frac : float
Between 0 and 1. The fraction of the data used
when estimating each y-value.
it : int
The number of residual-based reweightings
to perform.
Returns
-------
out: numpy array
A numpy array with two columns. The first column
is the sorted x values and the second column the
associated estimated y-values.
Notes
-----
This lowess function implements the algorithm given in the
reference below using local linear estimates.
Suppose the input data has N points. The algorithm works by
estimating the true ``y_i`` by taking the frac*N closest points
to ``(x_i,y_i)`` based on their x values and estimating ``y_i``
using a weighted linear regression. The weight for ``(x_j,y_j)``
is `_lowess_tricube` function applied to ``|x_i-x_j|``.
If ``iter > 0``, then further weighted local linear regressions
are performed, where the weights are the same as above
times the `_lowess_bisquare` function of the residuals. Each iteration
takes approximately the same amount of time as the original fit,
so these iterations are expensive. They are most useful when
the noise has extremely heavy tails, such as Cauchy noise.
Noise with less heavy-tails, such as t-distributions with ``df > 2``,
are less problematic. The weights downgrade the influence of
points with large residuals. In the extreme case, points whose
residuals are larger than 6 times the median absolute residual
are given weight 0.
Some experimentation is likely required to find a good
choice of frac and iter for a particular dataset.
References
----------
Cleveland, W.S. (1979) "Robust Locally Weighted Regression
and Smoothing Scatterplots". Journal of the American Statistical
Association 74 (368): 829-836.
Examples
--------
The below allows a comparison between how different the fits from
`lowess` for different values of frac can be.
>>> import numpy as np
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low=-2*np.pi, high=2*np.pi, size=500)
>>> y = np.sin(x) + np.random.normal(size=len(x))
>>> z = lowess(y, x)
>>> w = lowess(y, x, frac=1./3)
This gives a similar comparison for when it is 0 vs not.
>>> import scipy.stats as stats
>>> x = np.random.uniform(low=-2*np.pi, high=2*np.pi, size=500)
>>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))
>>> z = lowess(y, x, frac= 1./3, it=0)
>>> w = lowess(y, x, frac=1./3)
"""
x = exog
if exog.ndim != 1:
raise ValueError('exog must be a vector')
if endog.ndim != 1:
raise ValueError('endog must be a vector')
if endog.shape[0] != x.shape[0] :
raise ValueError('exog and endog must have same length')
n = exog.shape[0]
fitted = np.zeros(n)
k = int(frac * n)
index_array = np.argsort(exog)
x_copy = np.array(exog[index_array]) #, dtype ='float32')
y_copy = endog[index_array]
fitted, weights = _lowess_initial_fit(x_copy, y_copy, k, n)
for i in range(it):
_lowess_robustify_fit(x_copy, y_copy, fitted,
weights, k, n)
out = np.array([x_copy, fitted]).T
out.shape = (n,2)
return out | LOWESS (Locally Weighted Scatterplot Smoothing)
A lowess function that outs smoothed estimates of endog
at the given exog values from points (exog, endog)
Parameters
----------
endog : 1-D numpy array
The y-values of the observed points
exog : 1-D numpy array
The x-values of the observed points
frac : float
Between 0 and 1. The fraction of the data used
when estimating each y-value.
it : int
The number of residual-based reweightings
to perform.
Returns
-------
out: numpy array
A numpy array with two columns. The first column
is the sorted x values and the second column the
associated estimated y-values.
Notes
-----
This lowess function implements the algorithm given in the
reference below using local linear estimates.
Suppose the input data has N points. The algorithm works by
estimating the true ``y_i`` by taking the frac*N closest points
to ``(x_i,y_i)`` based on their x values and estimating ``y_i``
using a weighted linear regression. The weight for ``(x_j,y_j)``
is `_lowess_tricube` function applied to ``|x_i-x_j|``.
If ``iter > 0``, then further weighted local linear regressions
are performed, where the weights are the same as above
times the `_lowess_bisquare` function of the residuals. Each iteration
takes approximately the same amount of time as the original fit,
so these iterations are expensive. They are most useful when
the noise has extremely heavy tails, such as Cauchy noise.
Noise with less heavy-tails, such as t-distributions with ``df > 2``,
are less problematic. The weights downgrade the influence of
points with large residuals. In the extreme case, points whose
residuals are larger than 6 times the median absolute residual
are given weight 0.
Some experimentation is likely required to find a good
choice of frac and iter for a particular dataset.
References
----------
Cleveland, W.S. (1979) "Robust Locally Weighted Regression
and Smoothing Scatterplots". Journal of the American Statistical
Association 74 (368): 829-836.
Examples
--------
The below allows a comparison between how different the fits from
`lowess` for different values of frac can be.
>>> import numpy as np
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low=-2*np.pi, high=2*np.pi, size=500)
>>> y = np.sin(x) + np.random.normal(size=len(x))
>>> z = lowess(y, x)
>>> w = lowess(y, x, frac=1./3)
This gives a similar comparison for when it is 0 vs not.
>>> import scipy.stats as stats
>>> x = np.random.uniform(low=-2*np.pi, high=2*np.pi, size=500)
>>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))
>>> z = lowess(y, x, frac= 1./3, it=0)
>>> w = lowess(y, x, frac=1./3) | lowess | python | statsmodels/statsmodels | statsmodels/nonparametric/smoothers_lowess_old.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/smoothers_lowess_old.py | BSD-3-Clause |
def _lowess_initial_fit(x_copy, y_copy, k, n):
"""
The initial weighted local linear regression for lowess.
Parameters
----------
x_copy : 1-d ndarray
The x-values/exogenous part of the data being smoothed
y_copy : 1-d ndarray
The y-values/ endogenous part of the data being smoothed
k : int
The number of data points which affect the linear fit for
each estimated point
n : int
The total number of points
Returns
-------
fitted : 1-d ndarray
The fitted y-values
weights : 2-d ndarray
An n by k array. The contribution to the weights in the
local linear fit coming from the distances between the
x-values
"""
weights = np.zeros((n,k), dtype = x_copy.dtype)
nn_indices = [0,k]
X = np.ones((k,2))
fitted = np.zeros(n)
for i in range(n):
#note: all _lowess functions are inplace, no return
left_width = x_copy[i] - x_copy[nn_indices[0]]
right_width = x_copy[nn_indices[1]-1] - x_copy[i]
width = max(left_width, right_width)
_lowess_wt_standardize(weights[i,:],
x_copy[nn_indices[0]:nn_indices[1]],
x_copy[i], width)
_lowess_tricube(weights[i,:])
weights[i,:] = np.sqrt(weights[i,:])
X[:,1] = x_copy[nn_indices[0]:nn_indices[1]]
y_i = weights[i,:] * y_copy[nn_indices[0]:nn_indices[1]]
beta = lstsq(weights[i,:].reshape(k,1) * X, y_i, rcond=-1)[0]
fitted[i] = beta[0] + beta[1]*x_copy[i]
_lowess_update_nn(x_copy, nn_indices, i+1)
return fitted, weights | The initial weighted local linear regression for lowess.
Parameters
----------
x_copy : 1-d ndarray
The x-values/exogenous part of the data being smoothed
y_copy : 1-d ndarray
The y-values/ endogenous part of the data being smoothed
k : int
The number of data points which affect the linear fit for
each estimated point
n : int
The total number of points
Returns
-------
fitted : 1-d ndarray
The fitted y-values
weights : 2-d ndarray
An n by k array. The contribution to the weights in the
local linear fit coming from the distances between the
x-values | _lowess_initial_fit | python | statsmodels/statsmodels | statsmodels/nonparametric/smoothers_lowess_old.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/smoothers_lowess_old.py | BSD-3-Clause |
def _lowess_wt_standardize(weights, new_entries, x_copy_i, width):
"""
The initial phase of creating the weights.
Subtract the current x_i and divide by the width.
Parameters
----------
weights : ndarray
The memory where (new_entries - x_copy_i)/width will be placed
new_entries : ndarray
The x-values of the k closest points to x[i]
x_copy_i : float
x[i], the i'th point in the (sorted) x values
width : float
The maximum distance between x[i] and any point in new_entries
Returns
-------
Nothing. The modifications are made to weight in place.
"""
weights[:] = new_entries
weights -= x_copy_i
weights /= width | The initial phase of creating the weights.
Subtract the current x_i and divide by the width.
Parameters
----------
weights : ndarray
The memory where (new_entries - x_copy_i)/width will be placed
new_entries : ndarray
The x-values of the k closest points to x[i]
x_copy_i : float
x[i], the i'th point in the (sorted) x values
width : float
The maximum distance between x[i] and any point in new_entries
Returns
-------
Nothing. The modifications are made to weight in place. | _lowess_wt_standardize | python | statsmodels/statsmodels | statsmodels/nonparametric/smoothers_lowess_old.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/smoothers_lowess_old.py | BSD-3-Clause |
def _lowess_robustify_fit(x_copy, y_copy, fitted, weights, k, n):
"""
Additional weighted local linear regressions, performed if
iter>0. They take into account the sizes of the residuals,
to eliminate the effect of extreme outliers.
Parameters
----------
x_copy : 1-d ndarray
The x-values/exogenous part of the data being smoothed
y_copy : 1-d ndarray
The y-values/ endogenous part of the data being smoothed
fitted : 1-d ndarray
The fitted y-values from the previous iteration
weights : 2-d ndarray
An n by k array. The contribution to the weights in the
local linear fit coming from the distances between the
x-values
k : int
The number of data points which affect the linear fit for
each estimated point
n : int
The total number of points
Returns
-------
Nothing. The fitted values are modified in place.
"""
nn_indices = [0,k]
X = np.ones((k,2))
residual_weights = np.copy(y_copy)
residual_weights.shape = (n,)
residual_weights -= fitted
residual_weights = np.absolute(residual_weights)#, out=residual_weights)
s = np.median(residual_weights)
residual_weights /= (6*s)
too_big = residual_weights>=1
_lowess_bisquare(residual_weights)
residual_weights[too_big] = 0
for i in range(n):
total_weights = weights[i,:] * np.sqrt(residual_weights[nn_indices[0]:
nn_indices[1]])
X[:,1] = x_copy[nn_indices[0]:nn_indices[1]]
y_i = total_weights * y_copy[nn_indices[0]:nn_indices[1]]
total_weights.shape = (k,1)
beta = lstsq(total_weights * X, y_i, rcond=-1)[0]
fitted[i] = beta[0] + beta[1] * x_copy[i]
_lowess_update_nn(x_copy, nn_indices, i+1) | Additional weighted local linear regressions, performed if
iter>0. They take into account the sizes of the residuals,
to eliminate the effect of extreme outliers.
Parameters
----------
x_copy : 1-d ndarray
The x-values/exogenous part of the data being smoothed
y_copy : 1-d ndarray
The y-values/ endogenous part of the data being smoothed
fitted : 1-d ndarray
The fitted y-values from the previous iteration
weights : 2-d ndarray
An n by k array. The contribution to the weights in the
local linear fit coming from the distances between the
x-values
k : int
The number of data points which affect the linear fit for
each estimated point
n : int
The total number of points
Returns
-------
Nothing. The fitted values are modified in place. | _lowess_robustify_fit | python | statsmodels/statsmodels | statsmodels/nonparametric/smoothers_lowess_old.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/smoothers_lowess_old.py | BSD-3-Clause |
def _lowess_update_nn(x, cur_nn,i):
"""
Update the endpoints of the nearest neighbors to
the ith point.
Parameters
----------
x : iterable
The sorted points of x-values
cur_nn : list of length 2
The two current indices between which are the
k closest points to x[i]. (The actual value of
k is irrelevant for the algorithm.
i : int
The index of the current value in x for which
the k closest points are desired.
Returns
-------
Nothing. It modifies cur_nn in place.
"""
while True:
if cur_nn[1]<x.size:
left_dist = x[i] - x[cur_nn[0]]
new_right_dist = x[cur_nn[1]] - x[i]
if new_right_dist < left_dist:
cur_nn[0] = cur_nn[0] + 1
cur_nn[1] = cur_nn[1] + 1
else:
break
else:
break | Update the endpoints of the nearest neighbors to
the ith point.
Parameters
----------
x : iterable
The sorted points of x-values
cur_nn : list of length 2
The two current indices between which are the
k closest points to x[i]. (The actual value of
k is irrelevant for the algorithm.
i : int
The index of the current value in x for which
the k closest points are desired.
Returns
-------
Nothing. It modifies cur_nn in place. | _lowess_update_nn | python | statsmodels/statsmodels | statsmodels/nonparametric/smoothers_lowess_old.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/smoothers_lowess_old.py | BSD-3-Clause |
def _lowess_tricube(t):
"""
The _tricube function applied to a numpy array.
The tricube function is (1-abs(t)**3)**3.
Parameters
----------
t : ndarray
Array the tricube function is applied to elementwise and
in-place.
Returns
-------
Nothing
"""
#t = (1-np.abs(t)**3)**3
t[:] = np.absolute(t) #, out=t) #numpy version?
_lowess_mycube(t)
t[:] = np.negative(t) #, out = t)
t += 1
_lowess_mycube(t) | The _tricube function applied to a numpy array.
The tricube function is (1-abs(t)**3)**3.
Parameters
----------
t : ndarray
Array the tricube function is applied to elementwise and
in-place.
Returns
-------
Nothing | _lowess_tricube | python | statsmodels/statsmodels | statsmodels/nonparametric/smoothers_lowess_old.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/smoothers_lowess_old.py | BSD-3-Clause |
def _lowess_mycube(t):
"""
Fast matrix cube
Parameters
----------
t : ndarray
Array that is cubed, elementwise and in-place
Returns
-------
Nothing
"""
#t **= 3
t2 = t*t
t *= t2 | Fast matrix cube
Parameters
----------
t : ndarray
Array that is cubed, elementwise and in-place
Returns
-------
Nothing | _lowess_mycube | python | statsmodels/statsmodels | statsmodels/nonparametric/smoothers_lowess_old.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/smoothers_lowess_old.py | BSD-3-Clause |
def _lowess_bisquare(t):
"""
The bisquare function applied to a numpy array.
The bisquare function is (1-t**2)**2.
Parameters
----------
t : ndarray
array bisquare function is applied to, element-wise and in-place.
Returns
-------
Nothing
"""
#t = (1-t**2)**2
t *= t
t[:] = np.negative(t) #, out=t)
t += 1
t *= t | The bisquare function applied to a numpy array.
The bisquare function is (1-t**2)**2.
Parameters
----------
t : ndarray
array bisquare function is applied to, element-wise and in-place.
Returns
-------
Nothing | _lowess_bisquare | python | statsmodels/statsmodels | statsmodels/nonparametric/smoothers_lowess_old.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/smoothers_lowess_old.py | BSD-3-Clause |
def pdf_kernel_asym(x, sample, bw, kernel_type, weights=None, batch_size=10):
"""Density estimate based on asymmetric kernel.
Parameters
----------
x : array_like, float
Points for which density is evaluated. ``x`` can be scalar or 1-dim.
sample : ndarray, 1-d
Sample from which kernel estimate is computed.
bw : float
Bandwidth parameter, there is currently no default value for it.
kernel_type : str or callable
Kernel name or kernel function.
Currently supported kernel names are "beta", "beta2", "gamma",
"gamma2", "bs", "invgamma", "invgauss", "lognorm", "recipinvgauss" and
"weibull".
weights : None or ndarray
If weights is not None, then kernel for sample points are weighted
by it. No weights corresponds to uniform weighting of each component
with 1 / nobs, where nobs is the size of `sample`.
batch_size : float
If x is an 1-dim array, then points can be evaluated in vectorized
form. To limit the amount of memory, a loop can work in batches.
The number of batches is determined so that the intermediate array
sizes are limited by
``np.size(batch) * len(sample) < batch_size * 1000``.
Default is to have at most 10000 elements in intermediate arrays.
Returns
-------
pdf : float or ndarray
Estimate of pdf at points x. ``pdf`` has the same size or shape as x.
"""
if callable(kernel_type):
kfunc = kernel_type
else:
kfunc = kernel_dict_pdf[kernel_type]
batch_size = batch_size * 1000
if np.size(x) * len(sample) < batch_size:
# no batch-loop
if np.size(x) > 1:
x = np.asarray(x)[:, None]
pdfi = kfunc(x, sample, bw)
if weights is None:
pdf = pdfi.mean(-1)
else:
pdf = pdfi @ weights
else:
# batch, designed for 1-d x
if weights is None:
weights = np.ones(len(sample)) / len(sample)
k = batch_size // len(sample)
n = len(x) // k
x_split = np.array_split(x, n)
pdf = np.concatenate([(kfunc(xi[:, None], sample, bw) @ weights)
for xi in x_split])
return pdf | Density estimate based on asymmetric kernel.
Parameters
----------
x : array_like, float
Points for which density is evaluated. ``x`` can be scalar or 1-dim.
sample : ndarray, 1-d
Sample from which kernel estimate is computed.
bw : float
Bandwidth parameter, there is currently no default value for it.
kernel_type : str or callable
Kernel name or kernel function.
Currently supported kernel names are "beta", "beta2", "gamma",
"gamma2", "bs", "invgamma", "invgauss", "lognorm", "recipinvgauss" and
"weibull".
weights : None or ndarray
If weights is not None, then kernel for sample points are weighted
by it. No weights corresponds to uniform weighting of each component
with 1 / nobs, where nobs is the size of `sample`.
batch_size : float
If x is an 1-dim array, then points can be evaluated in vectorized
form. To limit the amount of memory, a loop can work in batches.
The number of batches is determined so that the intermediate array
sizes are limited by
``np.size(batch) * len(sample) < batch_size * 1000``.
Default is to have at most 10000 elements in intermediate arrays.
Returns
-------
pdf : float or ndarray
Estimate of pdf at points x. ``pdf`` has the same size or shape as x. | pdf_kernel_asym | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels_asymmetric.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels_asymmetric.py | BSD-3-Clause |
def cdf_kernel_asym(x, sample, bw, kernel_type, weights=None, batch_size=10):
"""Estimate of cumulative distribution based on asymmetric kernel.
Parameters
----------
x : array_like, float
Points for which density is evaluated. ``x`` can be scalar or 1-dim.
sample : ndarray, 1-d
Sample from which kernel estimate is computed.
bw : float
Bandwidth parameter, there is currently no default value for it.
kernel_type : str or callable
Kernel name or kernel function.
Currently supported kernel names are "beta", "beta2", "gamma",
"gamma2", "bs", "invgamma", "invgauss", "lognorm", "recipinvgauss" and
"weibull".
weights : None or ndarray
If weights is not None, then kernel for sample points are weighted
by it. No weights corresponds to uniform weighting of each component
with 1 / nobs, where nobs is the size of `sample`.
batch_size : float
If x is an 1-dim array, then points can be evaluated in vectorized
form. To limit the amount of memory, a loop can work in batches.
The number of batches is determined so that the intermediate array
sizes are limited by
``np.size(batch) * len(sample) < batch_size * 1000``.
Default is to have at most 10000 elements in intermediate arrays.
Returns
-------
cdf : float or ndarray
Estimate of cdf at points x. ``cdf`` has the same size or shape as x.
"""
if callable(kernel_type):
kfunc = kernel_type
else:
kfunc = kernel_dict_cdf[kernel_type]
batch_size = batch_size * 1000
if np.size(x) * len(sample) < batch_size:
# no batch-loop
if np.size(x) > 1:
x = np.asarray(x)[:, None]
cdfi = kfunc(x, sample, bw)
if weights is None:
cdf = cdfi.mean(-1)
else:
cdf = cdfi @ weights
else:
# batch, designed for 1-d x
if weights is None:
weights = np.ones(len(sample)) / len(sample)
k = batch_size // len(sample)
n = len(x) // k
x_split = np.array_split(x, n)
cdf = np.concatenate([(kfunc(xi[:, None], sample, bw) @ weights)
for xi in x_split])
return cdf | Estimate of cumulative distribution based on asymmetric kernel.
Parameters
----------
x : array_like, float
Points for which density is evaluated. ``x`` can be scalar or 1-dim.
sample : ndarray, 1-d
Sample from which kernel estimate is computed.
bw : float
Bandwidth parameter, there is currently no default value for it.
kernel_type : str or callable
Kernel name or kernel function.
Currently supported kernel names are "beta", "beta2", "gamma",
"gamma2", "bs", "invgamma", "invgauss", "lognorm", "recipinvgauss" and
"weibull".
weights : None or ndarray
If weights is not None, then kernel for sample points are weighted
by it. No weights corresponds to uniform weighting of each component
with 1 / nobs, where nobs is the size of `sample`.
batch_size : float
If x is an 1-dim array, then points can be evaluated in vectorized
form. To limit the amount of memory, a loop can work in batches.
The number of batches is determined so that the intermediate array
sizes are limited by
``np.size(batch) * len(sample) < batch_size * 1000``.
Default is to have at most 10000 elements in intermediate arrays.
Returns
-------
cdf : float or ndarray
Estimate of cdf at points x. ``cdf`` has the same size or shape as x. | cdf_kernel_asym | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels_asymmetric.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels_asymmetric.py | BSD-3-Clause |
def _kernel_pdf_gamma(x, sample, bw):
"""Gamma kernel for pdf, without boundary corrected part.
drops `+ 1` in shape parameter
It should be possible to use this if probability in
neighborhood of zero boundary is small.
"""
return stats.gamma.pdf(sample, x / bw, scale=bw) | Gamma kernel for pdf, without boundary corrected part.
drops `+ 1` in shape parameter
It should be possible to use this if probability in
neighborhood of zero boundary is small. | _kernel_pdf_gamma | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels_asymmetric.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels_asymmetric.py | BSD-3-Clause |
def _kernel_cdf_gamma(x, sample, bw):
"""Gamma kernel for cdf, without boundary corrected part.
drops `+ 1` in shape parameter
It should be possible to use this if probability in
neighborhood of zero boundary is small.
"""
return stats.gamma.sf(sample, x / bw, scale=bw) | Gamma kernel for cdf, without boundary corrected part.
drops `+ 1` in shape parameter
It should be possible to use this if probability in
neighborhood of zero boundary is small. | _kernel_cdf_gamma | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels_asymmetric.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels_asymmetric.py | BSD-3-Clause |
def kernel_pdf_invgauss_(x, sample, bw):
"""Inverse gaussian kernel density, explicit formula.
Scaillet 2004
"""
pdf = (1 / np.sqrt(2 * np.pi * bw * sample**3) *
np.exp(- 1 / (2 * bw * x) * (sample / x - 2 + x / sample)))
return pdf.mean(-1) | Inverse gaussian kernel density, explicit formula.
Scaillet 2004 | kernel_pdf_invgauss_ | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels_asymmetric.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels_asymmetric.py | BSD-3-Clause |
def kernel_pdf_recipinvgauss_(x, sample, bw):
"""Reciprocal inverse gaussian kernel density, explicit formula.
Scaillet 2004
"""
pdf = (1 / np.sqrt(2 * np.pi * bw * sample) *
np.exp(- (x - bw) / (2 * bw) * sample / (x - bw) - 2 +
(x - bw) / sample))
return pdf | Reciprocal inverse gaussian kernel density, explicit formula.
Scaillet 2004 | kernel_pdf_recipinvgauss_ | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels_asymmetric.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels_asymmetric.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.