code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def test_std_err(df):
"""
Test the standard error of the mean matches result from scipy.stats.sem
"""
np.testing.assert_allclose(
Description(df["a"]).frame.loc["std_err"],
scipy.stats.sem(df["a"])
) | Test the standard error of the mean matches result from scipy.stats.sem | test_std_err | python | statsmodels/statsmodels | statsmodels/stats/tests/test_descriptivestats.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/tests/test_descriptivestats.py | BSD-3-Clause |
def _isfloat(x):
"""
returns True if x is a float,
returns False otherwise
"""
try:
float(x)
except Exception:
return False
return True | returns True if x is a float,
returns False otherwise | _isfloat | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def _phi( p ):
# this function is faster than using scipy.stats.norm.isf(p)
# but the permissity of the license is not explicitly listed.
# using scipy.stats.norm.isf(p) is an acceptable alternative
"""
Modified from the author's original perl code (original comments follow below)
by [email protected]. May 3, 2004.
Lower tail quantile for standard normal distribution function.
This function returns an approximation of the inverse cumulative
standard normal distribution function. I.e., given P, it returns
an approximation to the X satisfying P = Pr{Z <= X} where Z is a
random variable from the standard normal distribution.
The algorithm uses a minimax approximation by rational functions
and the result has a relative error whose absolute value is less
than 1.15e-9.
Author: Peter John Acklam
Time-stamp: 2000-07-19 18:26:14
E-mail: [email protected]
WWW URL: http://home.online.no/~pjacklam
"""
if p <= 0 or p >= 1:
# The original perl code exits here, we'll throw an exception instead
raise ValueError( "Argument to ltqnorm %f must be in open interval (0,1)" % p )
# Coefficients in rational approximations.
a = (-3.969683028665376e+01, 2.209460984245205e+02, \
-2.759285104469687e+02, 1.383577518672690e+02, \
-3.066479806614716e+01, 2.506628277459239e+00)
b = (-5.447609879822406e+01, 1.615858368580409e+02, \
-1.556989798598866e+02, 6.680131188771972e+01, \
-1.328068155288572e+01 )
c = (-7.784894002430293e-03, -3.223964580411365e-01, \
-2.400758277161838e+00, -2.549732539343734e+00, \
4.374664141464968e+00, 2.938163982698783e+00)
d = ( 7.784695709041462e-03, 3.224671290700398e-01, \
2.445134137142996e+00, 3.754408661907416e+00)
# Define break-points.
plow = 0.02425
phigh = 1 - plow
# Rational approximation for lower region:
if p < plow:
q = math.sqrt(-2*math.log(p))
return -(((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) / \
((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1)
# Rational approximation for upper region:
if phigh < p:
q = math.sqrt(-2*math.log(1-p))
return (((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) / \
((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1)
# Rational approximation for central region:
q = p - 0.5
r = q*q
return -(((((a[0]*r+a[1])*r+a[2])*r+a[3])*r+a[4])*r+a[5])*q / \
(((((b[0]*r+b[1])*r+b[2])*r+b[3])*r+b[4])*r+1) | Modified from the author's original perl code (original comments follow below)
by [email protected]. May 3, 2004.
Lower tail quantile for standard normal distribution function.
This function returns an approximation of the inverse cumulative
standard normal distribution function. I.e., given P, it returns
an approximation to the X satisfying P = Pr{Z <= X} where Z is a
random variable from the standard normal distribution.
The algorithm uses a minimax approximation by rational functions
and the result has a relative error whose absolute value is less
than 1.15e-9.
Author: Peter John Acklam
Time-stamp: 2000-07-19 18:26:14
E-mail: [email protected]
WWW URL: http://home.online.no/~pjacklam | _phi | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def _ptransform(p):
"""function for p-value abcissa transformation"""
return -1. / (1. + 1.5 * _phi((1. + p)/2.)) | function for p-value abcissa transformation | _ptransform | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def _func(a, p, r, v):
"""
calculates f-hat for the coefficients in a, probability p,
sample mean difference r, and degrees of freedom v.
"""
# eq. 2.3
f = a[0]*math.log(r-1.) + \
a[1]*math.log(r-1.)**2 + \
a[2]*math.log(r-1.)**3 + \
a[3]*math.log(r-1.)**4
# eq. 2.7 and 2.8 corrections
if r == 3:
f += -0.002 / (1. + 12. * _phi(p)**2)
if v <= 4.364:
v = v if not np.isinf(v) else 1e38
f += 1. / 517. - 1. / (312. * v)
else:
v = v if not np.isinf(v) else 1e38
f += 1. / (191. * v)
return -f | calculates f-hat for the coefficients in a, probability p,
sample mean difference r, and degrees of freedom v. | _func | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def _select_ps(p):
# There are more generic ways of doing this but profiling
# revealed that selecting these points is one of the slow
# things that is easy to change. This is about 11 times
# faster than the generic algorithm it is replacing.
#
# it is possible that different break points could yield
# better estimates, but the function this is refactoring
# just used linear distance.
"""returns the points to use for interpolating p"""
if p >= .99:
return .990, .995, .999
elif p >= .975:
return .975, .990, .995
elif p >= .95:
return .950, .975, .990
elif p >= .9125:
return .900, .950, .975
elif p >= .875:
return .850, .900, .950
elif p >= .825:
return .800, .850, .900
elif p >= .7625:
return .750, .800, .850
elif p >= .675:
return .675, .750, .800
elif p >= .500:
return .500, .675, .750
else:
return .100, .500, .675 | returns the points to use for interpolating p | _select_ps | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def _interpolate_p(p, r, v):
"""
interpolates p based on the values in the A table for the
scalar value of r and the scalar value of v
"""
# interpolate p (v should be in table)
# if .5 < p < .75 use linear interpolation in q
# if p > .75 use quadratic interpolation in log(y + r/v)
# by -1. / (1. + 1.5 * _phi((1. + p)/2.))
# find the 3 closest v values
p0, p1, p2 = _select_ps(p)
try:
y0 = _func(A[(p0, v)], p0, r, v) + 1.
except:
print(p,r,v)
raise
y1 = _func(A[(p1, v)], p1, r, v) + 1.
y2 = _func(A[(p2, v)], p2, r, v) + 1.
y_log0 = math.log(y0 + float(r)/float(v))
y_log1 = math.log(y1 + float(r)/float(v))
y_log2 = math.log(y2 + float(r)/float(v))
# If p < .85 apply only the ordinate transformation
# if p > .85 apply the ordinate and the abcissa transformation
# In both cases apply quadratic interpolation
if p > .85:
p_t = _ptransform(p)
p0_t = _ptransform(p0)
p1_t = _ptransform(p1)
p2_t = _ptransform(p2)
# calculate derivatives for quadratic interpolation
d2 = 2*((y_log2-y_log1)/(p2_t-p1_t) - \
(y_log1-y_log0)/(p1_t-p0_t))/(p2_t-p0_t)
if (p2+p0)>=(p1+p1):
d1 = (y_log2-y_log1)/(p2_t-p1_t) - 0.5*d2*(p2_t-p1_t)
else:
d1 = (y_log1-y_log0)/(p1_t-p0_t) + 0.5*d2*(p1_t-p0_t)
d0 = y_log1
# interpolate value
y_log = (d2/2.) * (p_t-p1_t)**2. + d1 * (p_t-p1_t) + d0
# transform back to y
y = math.exp(y_log) - float(r)/float(v)
elif p > .5:
# calculate derivatives for quadratic interpolation
d2 = 2*((y_log2-y_log1)/(p2-p1) - \
(y_log1-y_log0)/(p1-p0))/(p2-p0)
if (p2+p0)>=(p1+p1):
d1 = (y_log2-y_log1)/(p2-p1) - 0.5*d2*(p2-p1)
else:
d1 = (y_log1-y_log0)/(p1-p0) + 0.5*d2*(p1-p0)
d0 = y_log1
# interpolate values
y_log = (d2/2.) * (p-p1)**2. + d1 * (p-p1) + d0
# transform back to y
y = math.exp(y_log) - float(r)/float(v)
else:
# linear interpolation in q and p
v = min(v, 1e38)
q0 = math.sqrt(2) * -y0 * \
scipy.stats.t.isf((1.+p0)/2., v)
q1 = math.sqrt(2) * -y1 * \
scipy.stats.t.isf((1.+p1)/2., v)
d1 = (q1-q0)/(p1-p0)
d0 = q0
# interpolate values
q = d1 * (p-p0) + d0
# transform back to y
y = -q / (math.sqrt(2) * scipy.stats.t.isf((1.+p)/2., v))
return y | interpolates p based on the values in the A table for the
scalar value of r and the scalar value of v | _interpolate_p | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def _select_vs(v, p):
# This one is is about 30 times faster than
# the generic algorithm it is replacing.
"""returns the points to use for interpolating v"""
if v >= 120.:
return 60, 120, inf
elif v >= 60.:
return 40, 60, 120
elif v >= 40.:
return 30, 40, 60
elif v >= 30.:
return 24, 30, 40
elif v >= 24.:
return 20, 24, 30
elif v >= 19.5:
return 19, 20, 24
if p >= .9:
if v < 2.5:
return 1, 2, 3
else:
if v < 3.5:
return 2, 3, 4
vi = int(round(v))
return vi - 1, vi, vi + 1 | returns the points to use for interpolating v | _select_vs | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def _interpolate_v(p, r, v):
"""
interpolates v based on the values in the A table for the
scalar value of r and th
"""
# interpolate v (p should be in table)
# ordinate: y**2
# abcissa: 1./v
# find the 3 closest v values
# only p >= .9 have table values for 1 degree of freedom.
# The boolean is used to index the tuple and append 1 when
# p >= .9
v0, v1, v2 = _select_vs(v, p)
# y = f - 1.
y0_sq = (_func(A[(p,v0)], p, r, v0) + 1.)**2.
y1_sq = (_func(A[(p,v1)], p, r, v1) + 1.)**2.
y2_sq = (_func(A[(p,v2)], p, r, v2) + 1.)**2.
# if v2 is inf set to a big number so interpolation
# calculations will work
if v2 > 1e38:
v2 = 1e38
# transform v
v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2
# calculate derivatives for quadratic interpolation
d2 = 2.*((y2_sq-y1_sq)/(v2_-v1_) - \
(y0_sq-y1_sq)/(v0_-v1_)) / (v2_-v0_)
if (v2_ + v0_) >= (v1_ + v1_):
d1 = (y2_sq-y1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_)
else:
d1 = (y1_sq-y0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_)
d0 = y1_sq
# calculate y
y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0)
return y | interpolates v based on the values in the A table for the
scalar value of r and th | _interpolate_v | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def _qsturng(p, r, v):
"""scalar version of qsturng"""
## print 'q',p
# r is interpolated through the q to y here we only need to
# account for when p and/or v are not found in the table.
global A, p_keys, v_keys
if p < .1 or p > .999:
raise ValueError('p must be between .1 and .999')
if p < .9:
if v < 2:
raise ValueError('v must be > 2 when p < .9')
else:
if v < 1:
raise ValueError('v must be > 1 when p >= .9')
# The easy case. A tabled value is requested.
#numpy 1.4.1: TypeError: unhashable type: 'numpy.ndarray' :
p = float(p)
if isinstance(v, np.ndarray):
v = v.item()
if (p,v) in A:
y = _func(A[(p,v)], p, r, v) + 1.
elif p not in p_keys and v not in v_keys+([],[1])[p>=.90]:
# apply bilinear (quadratic) interpolation
#
# p0,v2 + o + p1,v2 + p2,v2
# r2
#
# 1
# - (p,v)
# v x
#
# r1
# p0,v1 + o + p1,v1 + p2,v1
#
#
# p0,v0 + o r0 + p1,v0 + p2,v0
#
# _ptransform(p)
#
# (p1 and v1 may be below or above (p,v). The algorithm
# works in both cases. For diagramatic simplicity it is
# shown as above)
#
# 1. at v0, v1, and v2 use quadratic interpolation
# to find r0, r1, r2
#
# 2. use r0, r1, r2 and quadratic interpolaiton
# to find y and (p,v)
# find the 3 closest v values
v0, v1, v2 = _select_vs(v, p)
# find the 3 closest p values
p0, p1, p2 = _select_ps(p)
# calculate r0, r1, and r2
r0_sq = _interpolate_p(p, r, v0)**2
r1_sq = _interpolate_p(p, r, v1)**2
r2_sq = _interpolate_p(p, r, v2)**2
# transform v
v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2
# calculate derivatives for quadratic interpolation
d2 = 2.*((r2_sq-r1_sq)/(v2_-v1_) - \
(r0_sq-r1_sq)/(v0_-v1_)) / (v2_-v0_)
if (v2_ + v0_) >= (v1_ + v1_):
d1 = (r2_sq-r1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_)
else:
d1 = (r1_sq-r0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_)
d0 = r1_sq
# calculate y
y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0)
elif v not in v_keys+([],[1])[p>=.90]:
y = _interpolate_v(p, r, v)
elif p not in p_keys:
y = _interpolate_p(p, r, v)
v = min(v, 1e38)
return math.sqrt(2) * -y * scipy.stats.t.isf((1. + p) / 2., v) | scalar version of qsturng | _qsturng | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def qsturng(p, r, v):
"""Approximates the quantile p for a studentized range
distribution having v degrees of freedom and r samples
for probability p.
Parameters
----------
p : (scalar, array_like)
The cumulative probability value
p >= .1 and p <=.999
(values under .5 are not recommended)
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
q : (scalar, array_like)
approximation of the Studentized Range
"""
if all(map(_isfloat, [p, r, v])):
return _qsturng(p, r, v)
return _vqsturng(p, r, v) | Approximates the quantile p for a studentized range
distribution having v degrees of freedom and r samples
for probability p.
Parameters
----------
p : (scalar, array_like)
The cumulative probability value
p >= .1 and p <=.999
(values under .5 are not recommended)
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
q : (scalar, array_like)
approximation of the Studentized Range | qsturng | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def _psturng(q, r, v):
"""scalar version of psturng"""
if q < 0.:
raise ValueError('q should be >= 0')
def opt_func(p, r, v):
return np.squeeze(abs(_qsturng(p, r, v) - q))
if v == 1:
if q < _qsturng(.9, r, 1):
return .1
elif q > _qsturng(.999, r, 1):
return .001
soln = 1. - fminbound(opt_func, .9, .999, args=(r,v))
return np.atleast_1d(soln)
else:
if q < _qsturng(.1, r, v):
return .9
elif q > _qsturng(.999, r, v):
return .001
soln = 1. - fminbound(opt_func, .1, .999, args=(r,v))
return np.atleast_1d(soln) | scalar version of psturng | _psturng | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def psturng(q, r, v):
"""Evaluates the probability from 0 to q for a studentized
range having v degrees of freedom and r samples.
Parameters
----------
q : (scalar, array_like)
quantile value of Studentized Range
q >= 0.
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
p : (scalar, array_like)
1. - area from zero to q under the Studentized Range
distribution. When v == 1, p is bound between .001
and .1, when v > 1, p is bound between .001 and .9.
Values between .5 and .9 are 1st order appoximations.
"""
if all(map(_isfloat, [q, r, v])):
return _psturng(q, r, v)
return _vpsturng(q, r, v) | Evaluates the probability from 0 to q for a studentized
range having v degrees of freedom and r samples.
Parameters
----------
q : (scalar, array_like)
quantile value of Studentized Range
q >= 0.
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
p : (scalar, array_like)
1. - area from zero to q under the Studentized Range
distribution. When v == 1, p is bound between .001
and .1, when v > 1, p is bound between .001 and .9.
Values between .5 and .9 are 1st order appoximations. | psturng | python | statsmodels/statsmodels | statsmodels/stats/libqsturng/qsturng_.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/libqsturng/qsturng_.py | BSD-3-Clause |
def _mom_ate(params, endog, tind, prob, weighted=True):
"""moment condition for average treatment effect
This does not include a moment condition for potential outcome mean (POM).
"""
w1 = (tind / prob)
w0 = (1. - tind) / (1. - prob)
if weighted:
w0 /= w0.mean()
w1 /= w1.mean()
wdiff = w1 - w0
return endog * wdiff - params | moment condition for average treatment effect
This does not include a moment condition for potential outcome mean (POM). | _mom_ate | python | statsmodels/statsmodels | statsmodels/treatment/treatment_effects.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/treatment/treatment_effects.py | BSD-3-Clause |
def _mom_atm(params, endog, tind, prob, weighted=True):
"""moment conditions for average treatment means (POM)
moment conditions are POM0 and POM1
"""
w1 = (tind / prob)
w0 = (1. - tind) / (1. - prob)
if weighted:
w1 /= w1.mean()
w0 /= w0.mean()
return np.column_stack((endog * w0 - params[0], endog * w1 - params[1])) | moment conditions for average treatment means (POM)
moment conditions are POM0 and POM1 | _mom_atm | python | statsmodels/statsmodels | statsmodels/treatment/treatment_effects.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/treatment/treatment_effects.py | BSD-3-Clause |
def _mom_ols(params, endog, tind, prob, weighted=True):
"""
moment condition for average treatment mean based on OLS dummy regression
moment conditions are POM0 and POM1
"""
w = tind / prob + (1-tind) / (1 - prob)
treat_ind = np.column_stack((1 - tind, tind))
mom = (w * (endog - treat_ind.dot(params)))[:, None] * treat_ind
return mom | moment condition for average treatment mean based on OLS dummy regression
moment conditions are POM0 and POM1 | _mom_ols | python | statsmodels/statsmodels | statsmodels/treatment/treatment_effects.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/treatment/treatment_effects.py | BSD-3-Clause |
def _mom_ols_te(tm, endog, tind, prob, weighted=True):
"""
moment condition for average treatment mean based on OLS dummy regression
first moment is ATE
second moment is POM0 (control)
"""
w = tind / prob + (1-tind) / (1 - prob)
treat_ind = np.column_stack((tind, np.ones(len(tind))))
mom = (w * (endog - treat_ind.dot(tm)))[:, None] * treat_ind
return mom | moment condition for average treatment mean based on OLS dummy regression
first moment is ATE
second moment is POM0 (control) | _mom_ols_te | python | statsmodels/statsmodels | statsmodels/treatment/treatment_effects.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/treatment/treatment_effects.py | BSD-3-Clause |
def ate_ipw(endog, tind, prob, weighted=True, probt=None):
"""average treatment effect based on basic inverse propensity weighting.
"""
w1 = (tind / prob)
w0 = (1. - tind) / (1. - prob)
if probt is not None:
w1 *= probt
w0 *= probt
if weighted:
w0 /= w0.mean()
w1 /= w1.mean()
wdiff = w1 - w0
return (endog * wdiff).mean(), (endog * w0).mean(), (endog * w1).mean() | average treatment effect based on basic inverse propensity weighting. | ate_ipw | python | statsmodels/statsmodels | statsmodels/treatment/treatment_effects.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/treatment/treatment_effects.py | BSD-3-Clause |
def from_data(cls, endog, exog, treatment, model='ols', **kwds):
"""create models from data
not yet implemented
"""
raise NotImplementedError | create models from data
not yet implemented | from_data | python | statsmodels/statsmodels | statsmodels/treatment/treatment_effects.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/treatment/treatment_effects.py | BSD-3-Clause |
def ipw(self, return_results=True, effect_group="all", disp=False):
"""Inverse Probability Weighted treatment effect estimation.
Parameters
----------
return_results : bool
If True, then a results instance is returned.
If False, just ATE, POM0 and POM1 are returned.
effect_group : {"all", 0, 1}
``effectgroup`` determines for which population the effects are
estimated.
If effect_group is "all", then sample average treatment effect and
potential outcomes are returned.
If effect_group is 1 or "treated", then effects on treated are
returned.
If effect_group is 0, "treated" or "control", then effects on
untreated, i.e. control group, are returned.
disp : bool
Indicates whether the scipy optimizer should display the
optimization results
Returns
-------
TreatmentEffectsResults instance or tuple (ATE, POM0, POM1)
See Also
--------
TreatmentEffectsResults
"""
endog = self.model_pool.endog
tind = self.treatment
prob = self.prob_select
if effect_group == "all":
probt = None
elif effect_group in [1, "treated"]:
probt = prob
effect_group = 1 # standardize effect_group name
elif effect_group in [0, "untreated", "control"]:
probt = 1 - prob
effect_group = 0 # standardize effect_group name
elif isinstance(effect_group, np.ndarray):
probt = effect_group
effect_group = "user" # standardize effect_group name
else:
raise ValueError("incorrect option for effect_group")
res_ipw = ate_ipw(endog, tind, prob, weighted=True, probt=probt)
if not return_results:
return res_ipw
# gmm = _TEGMMGeneric1(endog, self.results_select, _mom_ols_te,
# probt=probt)
gmm = _IPWGMM(endog, self.results_select, None, teff=self,
effect_group=effect_group)
start_params = np.concatenate((res_ipw[:2],
self.results_select.params))
res_gmm = gmm.fit(start_params=start_params,
inv_weights=np.eye(len(start_params)),
optim_method='nm',
optim_args={"maxiter": 5000, "disp": disp},
maxiter=1,
)
res = TreatmentEffectResults(self, res_gmm, "IPW",
start_params=start_params,
effect_group=effect_group,
)
return res | Inverse Probability Weighted treatment effect estimation.
Parameters
----------
return_results : bool
If True, then a results instance is returned.
If False, just ATE, POM0 and POM1 are returned.
effect_group : {"all", 0, 1}
``effectgroup`` determines for which population the effects are
estimated.
If effect_group is "all", then sample average treatment effect and
potential outcomes are returned.
If effect_group is 1 or "treated", then effects on treated are
returned.
If effect_group is 0, "treated" or "control", then effects on
untreated, i.e. control group, are returned.
disp : bool
Indicates whether the scipy optimizer should display the
optimization results
Returns
-------
TreatmentEffectsResults instance or tuple (ATE, POM0, POM1)
See Also
--------
TreatmentEffectsResults | ipw | python | statsmodels/statsmodels | statsmodels/treatment/treatment_effects.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/treatment/treatment_effects.py | BSD-3-Clause |
def ra(self, return_results=True, effect_group="all", disp=False):
"""
Regression Adjustment treatment effect estimation.
\n%(params_returns)s
See Also
--------
TreatmentEffectsResults
"""
# need indicator for reordered observations
tind = np.zeros(len(self.treatment))
tind[-self.treatment.sum():] = 1
if effect_group == "all":
probt = None
elif effect_group in [1, "treated"]:
probt = tind
effect_group = 1 # standardize effect_group name
elif effect_group in [0, "untreated", "control"]:
probt = 1 - tind
effect_group = 0 # standardize effect_group name
elif isinstance(effect_group, np.ndarray):
# TODO: do we keep this?
probt = effect_group
effect_group = "user" # standardize effect_group name
else:
raise ValueError("incorrect option for effect_group")
exog = self.exog_grouped
# weight or indicator for effect_group
if probt is not None:
cw = (probt / probt.mean())
else:
cw = 1
pom0 = (self.results0.predict(exog) * cw).mean()
pom1 = (self.results1.predict(exog) * cw).mean()
if not return_results:
return pom1 - pom0, pom0, pom1
endog = self.model_pool.endog
mod_gmm = _RAGMM(endog, self.results_select, None, teff=self,
probt=probt)
start_params = np.concatenate((
# ate, tt0.effect,
[pom1 - pom0, pom0],
self.results0.params,
self.results1.params))
res_gmm = mod_gmm.fit(start_params=start_params,
inv_weights=np.eye(len(start_params)),
optim_method='nm',
optim_args={"maxiter": 5000, "disp": disp},
maxiter=1,
)
res = TreatmentEffectResults(self, res_gmm, "IPW",
start_params=start_params,
effect_group=effect_group,
)
return res | Regression Adjustment treatment effect estimation.
\n%(params_returns)s
See Also
--------
TreatmentEffectsResults | ra | python | statsmodels/statsmodels | statsmodels/treatment/treatment_effects.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/treatment/treatment_effects.py | BSD-3-Clause |
def aipw(self, return_results=True, disp=False):
"""
ATE and POM from double robust augmented inverse probability weighting
\n%(params_returns)s
See Also
--------
TreatmentEffectsResults
"""
nobs = self.nobs
prob = self.prob_select
tind = self.treatment
exog = self.model_pool.exog # in original order
correct0 = (self.results0.resid / (1 - prob[tind == 0])).sum() / nobs
correct1 = (self.results1.resid / (prob[tind == 1])).sum() / nobs
tmean0 = self.results0.predict(exog).mean() + correct0
tmean1 = self.results1.predict(exog).mean() + correct1
ate = tmean1 - tmean0
if not return_results:
return ate, tmean0, tmean1
endog = self.model_pool.endog
p2_aipw = np.asarray([ate, tmean0])
mag_aipw1 = _AIPWGMM(endog, self.results_select, None, teff=self)
start_params = np.concatenate((
p2_aipw,
self.results0.params, self.results1.params,
self.results_select.params))
res_gmm = mag_aipw1.fit(
start_params=start_params,
inv_weights=np.eye(len(start_params)),
optim_method='nm',
optim_args={"maxiter": 5000, "disp": disp},
maxiter=1)
res = TreatmentEffectResults(self, res_gmm, "IPW",
start_params=start_params,
effect_group="all",
)
return res | ATE and POM from double robust augmented inverse probability weighting
\n%(params_returns)s
See Also
--------
TreatmentEffectsResults | aipw | python | statsmodels/statsmodels | statsmodels/treatment/treatment_effects.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/treatment/treatment_effects.py | BSD-3-Clause |
def aipw_wls(self, return_results=True, disp=False):
"""
ATE and POM from double robust augmented inverse probability weighting.
This uses weighted outcome regression, while `aipw` uses unweighted
outcome regression.
Option for effect on treated or on untreated is not available.
\n%(params_returns)s
See Also
--------
TreatmentEffectsResults
"""
nobs = self.nobs
prob = self.prob_select
endog = self.model_pool.endog
exog = self.model_pool.exog
tind = self.treatment
treat_mask = self.treat_mask
ww1 = tind / prob * (tind / prob - 1)
mod1 = WLS(endog[treat_mask], exog[treat_mask],
weights=ww1[treat_mask])
result1 = mod1.fit(cov_type='HC1')
mean1_ipw2 = result1.predict(exog).mean()
ww0 = (1 - tind) / (1 - prob) * ((1 - tind) / (1 - prob) - 1)
mod0 = WLS(endog[~treat_mask], exog[~treat_mask],
weights=ww0[~treat_mask])
result0 = mod0.fit(cov_type='HC1')
mean0_ipw2 = result0.predict(exog).mean()
self.results_ipwwls0 = result0
self.results_ipwwls1 = result1
correct0 = (result0.resid / (1 - prob[tind == 0])).sum() / nobs
correct1 = (result1.resid / (prob[tind == 1])).sum() / nobs
tmean0 = mean0_ipw2 + correct0
tmean1 = mean1_ipw2 + correct1
ate = tmean1 - tmean0
if not return_results:
return ate, tmean0, tmean1
p2_aipw_wls = np.asarray([ate, tmean0]).squeeze()
# GMM
mod_gmm = _AIPWWLSGMM(endog, self.results_select, None,
teff=self)
start_params = np.concatenate((
p2_aipw_wls,
result0.params,
result1.params,
self.results_select.params))
res_gmm = mod_gmm.fit(
start_params=start_params,
inv_weights=np.eye(len(start_params)),
optim_method='nm',
optim_args={"maxiter": 5000, "disp": disp},
maxiter=1)
res = TreatmentEffectResults(self, res_gmm, "IPW",
start_params=start_params,
effect_group="all",
)
return res | ATE and POM from double robust augmented inverse probability weighting.
This uses weighted outcome regression, while `aipw` uses unweighted
outcome regression.
Option for effect on treated or on untreated is not available.
\n%(params_returns)s
See Also
--------
TreatmentEffectsResults | aipw_wls | python | statsmodels/statsmodels | statsmodels/treatment/treatment_effects.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/treatment/treatment_effects.py | BSD-3-Clause |
def ipw_ra(self, return_results=True, effect_group="all", disp=False):
"""
ATE and POM from inverse probability weighted regression adjustment.
\n%(params_returns)s
See Also
--------
TreatmentEffectsResults
"""
treat_mask = self.treat_mask
endog = self.model_pool.endog
exog = self.model_pool.exog
prob = self.prob_select
prob0 = prob[~treat_mask]
prob1 = prob[treat_mask]
if effect_group == "all":
w0 = 1 / (1 - prob0)
w1 = 1 / prob1
exogt = exog
elif effect_group in [1, "treated"]:
w0 = prob0 / (1 - prob0)
w1 = prob1 / prob1
exogt = exog[treat_mask]
effect_group = 1 # standardize effect_group name
elif effect_group in [0, "untreated", "control"]:
w0 = (1 - prob0) / (1 - prob0)
w1 = (1 - prob1) / prob1
exogt = exog[~treat_mask]
effect_group = 0 # standardize effect_group name
else:
raise ValueError("incorrect option for effect_group")
mod0 = WLS(endog[~treat_mask], exog[~treat_mask],
weights=w0)
result0 = mod0.fit(cov_type='HC1')
# mean0_ipwra = (result0.predict(exog) * (prob / prob.mean())).mean()
mean0_ipwra = result0.predict(exogt).mean()
mod1 = WLS(endog[treat_mask], exog[treat_mask],
weights=w1)
result1 = mod1.fit(cov_type='HC1')
# mean1_ipwra = (result1.predict(exog) * (prob / prob.mean())).mean()
mean1_ipwra = result1.predict(exogt).mean()
if not return_results:
return mean1_ipwra - mean0_ipwra, mean0_ipwra, mean1_ipwra
# GMM
mod_gmm = _IPWRAGMM(endog, self.results_select, None, teff=self,
effect_group=effect_group)
start_params = np.concatenate((
[mean1_ipwra - mean0_ipwra, mean0_ipwra],
result0.params,
result1.params,
np.asarray(self.results_select.params)
))
res_gmm = mod_gmm.fit(
start_params=start_params,
inv_weights=np.eye(len(start_params)),
optim_method='nm',
optim_args={"maxiter": 2000, "disp": disp},
maxiter=1
)
res = TreatmentEffectResults(self, res_gmm, "IPW",
start_params=start_params,
effect_group=effect_group,
)
return res | ATE and POM from inverse probability weighted regression adjustment.
\n%(params_returns)s
See Also
--------
TreatmentEffectsResults | ipw_ra | python | statsmodels/statsmodels | statsmodels/treatment/treatment_effects.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/treatment/treatment_effects.py | BSD-3-Clause |
def _calc_survfunc_right(time, status, weights=None, entry=None, compress=True,
retall=True):
"""
Calculate the survival function and its standard error for a single
group.
"""
# Convert the unique times to ranks (0, 1, 2, ...)
if entry is None:
utime, rtime = np.unique(time, return_inverse=True)
else:
tx = np.concatenate((time, entry))
utime, rtime = np.unique(tx, return_inverse=True)
rtime = rtime[0:len(time)]
# Number of deaths at each unique time.
ml = len(utime)
if weights is None:
d = np.bincount(rtime, weights=status, minlength=ml)
else:
d = np.bincount(rtime, weights=status*weights, minlength=ml)
# Size of risk set just prior to each event time.
if weights is None:
n = np.bincount(rtime, minlength=ml)
else:
n = np.bincount(rtime, weights=weights, minlength=ml)
if entry is not None:
n = np.cumsum(n) - n
rentry = np.searchsorted(utime, entry, side='left')
if weights is None:
n0 = np.bincount(rentry, minlength=ml)
else:
n0 = np.bincount(rentry, weights=weights, minlength=ml)
n0 = np.cumsum(n0) - n0
n = n0 - n
else:
n = np.cumsum(n[::-1])[::-1]
# Only retain times where an event occurred.
if compress:
ii = np.flatnonzero(d > 0)
d = d[ii]
n = n[ii]
utime = utime[ii]
# The survival function probabilities.
sp = 1 - d / n.astype(np.float64)
ii = sp < 1e-16
sp[ii] = 1e-16
sp = np.log(sp)
sp = np.cumsum(sp)
sp = np.exp(sp)
sp[ii] = 0
if not retall:
return sp, utime, rtime, n, d
# Standard errors
if weights is None:
# Greenwood's formula
denom = n * (n - d)
denom = np.clip(denom, 1e-12, np.inf)
se = d / denom.astype(np.float64)
se[(n == d) | (n == 0)] = np.nan
se = np.cumsum(se)
se = np.sqrt(se)
locs = np.isfinite(se) | (sp != 0)
se[locs] *= sp[locs]
se[~locs] = np.nan
else:
# Tsiatis' (1981) formula
se = d / (n * n).astype(np.float64)
se = np.cumsum(se)
se = np.sqrt(se)
return sp, se, utime, rtime, n, d | Calculate the survival function and its standard error for a single
group. | _calc_survfunc_right | python | statsmodels/statsmodels | statsmodels/duration/survfunc.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/survfunc.py | BSD-3-Clause |
def _calc_incidence_right(time, status, weights=None):
"""
Calculate the cumulative incidence function and its standard error.
"""
# Calculate the all-cause survival function.
status0 = (status >= 1).astype(np.float64)
sp, utime, rtime, n, d = _calc_survfunc_right(time, status0, weights,
compress=False, retall=False)
ngrp = int(status.max())
# Number of cause-specific deaths at each unique time.
d = []
for k in range(ngrp):
status0 = (status == k + 1).astype(np.float64)
if weights is None:
d0 = np.bincount(rtime, weights=status0, minlength=len(utime))
else:
d0 = np.bincount(rtime, weights=status0*weights,
minlength=len(utime))
d.append(d0)
# The cumulative incidence function probabilities.
ip = []
sp0 = np.r_[1, sp[:-1]] / n
for k in range(ngrp):
ip0 = np.cumsum(sp0 * d[k])
ip.append(ip0)
# The standard error of the cumulative incidence function.
if weights is not None:
return ip, None, utime
se = []
da = sum(d)
for k in range(ngrp):
ra = da / (n * (n - da))
v = ip[k]**2 * np.cumsum(ra)
v -= 2 * ip[k] * np.cumsum(ip[k] * ra)
v += np.cumsum(ip[k]**2 * ra)
ra = (n - d[k]) * d[k] / n
v += np.cumsum(sp0**2 * ra)
ra = sp0 * d[k] / n
v -= 2 * ip[k] * np.cumsum(ra)
v += 2 * np.cumsum(ip[k] * ra)
se.append(np.sqrt(v))
return ip, se, utime | Calculate the cumulative incidence function and its standard error. | _calc_incidence_right | python | statsmodels/statsmodels | statsmodels/duration/survfunc.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/survfunc.py | BSD-3-Clause |
def plot(self, ax=None):
"""
Plot the survival function.
Examples
--------
Change the line color:
>>> import statsmodels.api as sm
>>> data = sm.datasets.get_rdataset("flchain", "survival").data
>>> df = data.loc[data.sex == "F", :]
>>> sf = sm.SurvfuncRight(df["futime"], df["death"])
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[0].set_color('purple')
>>> li[1].set_color('purple')
Do not show the censoring points:
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[1].set_visible(False)
"""
return plot_survfunc(self, ax) | Plot the survival function.
Examples
--------
Change the line color:
>>> import statsmodels.api as sm
>>> data = sm.datasets.get_rdataset("flchain", "survival").data
>>> df = data.loc[data.sex == "F", :]
>>> sf = sm.SurvfuncRight(df["futime"], df["death"])
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[0].set_color('purple')
>>> li[1].set_color('purple')
Do not show the censoring points:
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[1].set_visible(False) | plot | python | statsmodels/statsmodels | statsmodels/duration/survfunc.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/survfunc.py | BSD-3-Clause |
def quantile(self, p):
"""
Estimated quantile of a survival distribution.
Parameters
----------
p : float
The probability point at which the quantile
is determined.
Returns the estimated quantile.
"""
# SAS uses a strict inequality here.
ii = np.flatnonzero(self.surv_prob < 1 - p)
if len(ii) == 0:
return np.nan
return self.surv_times[ii[0]] | Estimated quantile of a survival distribution.
Parameters
----------
p : float
The probability point at which the quantile
is determined.
Returns the estimated quantile. | quantile | python | statsmodels/statsmodels | statsmodels/duration/survfunc.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/survfunc.py | BSD-3-Clause |
def quantile_ci(self, p, alpha=0.05, method='cloglog'):
"""
Returns a confidence interval for a survival quantile.
Parameters
----------
p : float
The probability point for which a confidence interval is
determined.
alpha : float
The confidence interval has nominal coverage probability
1 - `alpha`.
method : str
Function to use for g-transformation, must be ...
Returns
-------
lb : float
The lower confidence limit.
ub : float
The upper confidence limit.
Notes
-----
The confidence interval is obtained by inverting Z-tests. The
limits of the confidence interval will always be observed
event times.
References
----------
The method is based on the approach used in SAS, documented here:
http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_lifetest_details03.htm
"""
tr = norm.ppf(1 - alpha / 2)
method = method.lower()
if method == "cloglog":
def g(x):
return np.log(-np.log(x))
def gprime(x):
return -1 / (x * np.log(x))
elif method == "linear":
def g(x):
return x
def gprime(x):
return 1
elif method == "log":
g = np.log
def gprime(x):
return 1 / x
elif method == "logit":
def g(x):
return np.log(x / (1 - x))
def gprime(x):
return 1 / (x * (1 - x))
elif method == "asinsqrt":
def g(x):
return np.arcsin(np.sqrt(x))
def gprime(x):
return 1 / (2 * np.sqrt(x) * np.sqrt(1 - x))
else:
raise ValueError("unknown method")
r = g(self.surv_prob) - g(1 - p)
r /= (gprime(self.surv_prob) * self.surv_prob_se)
ii = np.flatnonzero(np.abs(r) <= tr)
if len(ii) == 0:
return np.nan, np.nan
lb = self.surv_times[ii[0]]
if ii[-1] == len(self.surv_times) - 1:
ub = np.inf
else:
ub = self.surv_times[ii[-1] + 1]
return lb, ub | Returns a confidence interval for a survival quantile.
Parameters
----------
p : float
The probability point for which a confidence interval is
determined.
alpha : float
The confidence interval has nominal coverage probability
1 - `alpha`.
method : str
Function to use for g-transformation, must be ...
Returns
-------
lb : float
The lower confidence limit.
ub : float
The upper confidence limit.
Notes
-----
The confidence interval is obtained by inverting Z-tests. The
limits of the confidence interval will always be observed
event times.
References
----------
The method is based on the approach used in SAS, documented here:
http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_lifetest_details03.htm | quantile_ci | python | statsmodels/statsmodels | statsmodels/duration/survfunc.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/survfunc.py | BSD-3-Clause |
def summary(self):
"""
Return a summary of the estimated survival function.
The summary is a dataframe containing the unique event times,
estimated survival function values, and related quantities.
"""
df = pd.DataFrame(index=self.surv_times)
df.index.name = "Time"
df["Surv prob"] = self.surv_prob
df["Surv prob SE"] = self.surv_prob_se
df["num at risk"] = self.n_risk
df["num events"] = self.n_events
return df | Return a summary of the estimated survival function.
The summary is a dataframe containing the unique event times,
estimated survival function values, and related quantities. | summary | python | statsmodels/statsmodels | statsmodels/duration/survfunc.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/survfunc.py | BSD-3-Clause |
def simultaneous_cb(self, alpha=0.05, method="hw", transform="log"):
"""
Returns a simultaneous confidence band for the survival function.
Parameters
----------
alpha : float
`1 - alpha` is the desired simultaneous coverage
probability for the confidence region. Currently alpha
must be set to 0.05, giving 95% simultaneous intervals.
method : str
The method used to produce the simultaneous confidence
band. Only the Hall-Wellner (hw) method is currently
implemented.
transform : str
The used to produce the interval (note that the returned
interval is on the survival probability scale regardless
of which transform is used). Only `log` and `arcsin` are
implemented.
Returns
-------
lcb : array_like
The lower confidence limits corresponding to the points
in `surv_times`.
ucb : array_like
The upper confidence limits corresponding to the points
in `surv_times`.
"""
method = method.lower()
if method != "hw":
msg = "only the Hall-Wellner (hw) method is implemented"
raise ValueError(msg)
if alpha != 0.05:
raise ValueError("alpha must be set to 0.05")
transform = transform.lower()
s2 = self.surv_prob_se**2 / self.surv_prob**2
nn = self.n_risk
if transform == "log":
denom = np.sqrt(nn) * np.log(self.surv_prob)
theta = 1.3581 * (1 + nn * s2) / denom
theta = np.exp(theta)
lcb = self.surv_prob**(1/theta)
ucb = self.surv_prob**theta
elif transform == "arcsin":
k = 1.3581
k *= (1 + nn * s2) / (2 * np.sqrt(nn))
k *= np.sqrt(self.surv_prob / (1 - self.surv_prob))
f = np.arcsin(np.sqrt(self.surv_prob))
v = np.clip(f - k, 0, np.inf)
lcb = np.sin(v)**2
v = np.clip(f + k, -np.inf, np.pi/2)
ucb = np.sin(v)**2
else:
raise ValueError("Unknown transform")
return lcb, ucb | Returns a simultaneous confidence band for the survival function.
Parameters
----------
alpha : float
`1 - alpha` is the desired simultaneous coverage
probability for the confidence region. Currently alpha
must be set to 0.05, giving 95% simultaneous intervals.
method : str
The method used to produce the simultaneous confidence
band. Only the Hall-Wellner (hw) method is currently
implemented.
transform : str
The used to produce the interval (note that the returned
interval is on the survival probability scale regardless
of which transform is used). Only `log` and `arcsin` are
implemented.
Returns
-------
lcb : array_like
The lower confidence limits corresponding to the points
in `surv_times`.
ucb : array_like
The upper confidence limits corresponding to the points
in `surv_times`. | simultaneous_cb | python | statsmodels/statsmodels | statsmodels/duration/survfunc.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/survfunc.py | BSD-3-Clause |
def survdiff(time, status, group, weight_type=None, strata=None,
entry=None, **kwargs):
"""
Test for the equality of two survival distributions.
Parameters
----------
time : array_like
The event or censoring times.
status : array_like
The censoring status variable, status=1 indicates that the
event occurred, status=0 indicates that the observation was
censored.
group : array_like
Indicators of the two groups
weight_type : str
The following weight types are implemented:
None (default) : logrank test
fh : Fleming-Harrington, weights by S^(fh_p),
requires exponent fh_p to be provided as keyword
argument; the weights are derived from S defined at
the previous event time, and the first weight is
always 1.
gb : Gehan-Breslow, weights by the number at risk
tw : Tarone-Ware, weights by the square root of the number
at risk
strata : array_like
Optional stratum indicators for a stratified test
entry : array_like
Entry times to handle left truncation. The subject is not in
the risk set on or before the entry time.
Returns
-------
chisq : The chi-square (1 degree of freedom) distributed test
statistic value
pvalue : The p-value for the chi^2 test
"""
time = np.asarray(time)
status = np.asarray(status)
group = np.asarray(group)
gr = np.unique(group)
if strata is None:
obs, var = _survdiff(time, status, group, weight_type, gr,
entry, **kwargs)
else:
strata = np.asarray(strata)
stu = np.unique(strata)
obs, var = 0., 0.
for st in stu:
# could be more efficient?
ii = (strata == st)
obs1, var1 = _survdiff(time[ii], status[ii], group[ii],
weight_type, gr, entry, **kwargs)
obs += obs1
var += var1
chisq = obs.dot(np.linalg.solve(var, obs)) # (O - E).T * V^(-1) * (O - E)
pvalue = 1 - chi2.cdf(chisq, len(gr)-1)
return chisq, pvalue | Test for the equality of two survival distributions.
Parameters
----------
time : array_like
The event or censoring times.
status : array_like
The censoring status variable, status=1 indicates that the
event occurred, status=0 indicates that the observation was
censored.
group : array_like
Indicators of the two groups
weight_type : str
The following weight types are implemented:
None (default) : logrank test
fh : Fleming-Harrington, weights by S^(fh_p),
requires exponent fh_p to be provided as keyword
argument; the weights are derived from S defined at
the previous event time, and the first weight is
always 1.
gb : Gehan-Breslow, weights by the number at risk
tw : Tarone-Ware, weights by the square root of the number
at risk
strata : array_like
Optional stratum indicators for a stratified test
entry : array_like
Entry times to handle left truncation. The subject is not in
the risk set on or before the entry time.
Returns
-------
chisq : The chi-square (1 degree of freedom) distributed test
statistic value
pvalue : The p-value for the chi^2 test | survdiff | python | statsmodels/statsmodels | statsmodels/duration/survfunc.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/survfunc.py | BSD-3-Clause |
def plot_survfunc(survfuncs, ax=None):
"""
Plot one or more survivor functions.
Parameters
----------
survfuncs : object or array_like
A single SurvfuncRight object, or a list or SurvfuncRight
objects that are plotted together.
Returns
-------
A figure instance on which the plot was drawn.
Examples
--------
Add a legend:
>>> import statsmodels.api as sm
>>> from statsmodels.duration.survfunc import plot_survfunc
>>> data = sm.datasets.get_rdataset("flchain", "survival").data
>>> df = data.loc[data.sex == "F", :]
>>> sf0 = sm.SurvfuncRight(df["futime"], df["death"])
>>> sf1 = sm.SurvfuncRight(3.0 * df["futime"], df["death"])
>>> fig = plot_survfunc([sf0, sf1])
>>> ax = fig.get_axes()[0]
>>> ax.set_position([0.1, 0.1, 0.64, 0.8])
>>> ha, lb = ax.get_legend_handles_labels()
>>> leg = fig.legend((ha[0], ha[1]), (lb[0], lb[1]), loc='center right')
Change the line colors:
>>> fig = plot_survfunc([sf0, sf1])
>>> ax = fig.get_axes()[0]
>>> ax.set_position([0.1, 0.1, 0.64, 0.8])
>>> ha, lb = ax.get_legend_handles_labels()
>>> ha[0].set_color('purple')
>>> ha[1].set_color('orange')
"""
fig, ax = utils.create_mpl_ax(ax)
# If we have only a single survival function to plot, put it into
# a list.
try:
assert type(survfuncs[0]) is SurvfuncRight
except TypeError:
survfuncs = [survfuncs]
for gx, sf in enumerate(survfuncs):
# The estimated survival function does not include a point at
# time 0, include it here for plotting.
surv_times = np.concatenate(([0], sf.surv_times))
surv_prob = np.concatenate(([1], sf.surv_prob))
# If the final times are censoring times they are not included
# in the survival function so we add them here
mxt = max(sf.time)
if mxt > surv_times[-1]:
surv_times = np.concatenate((surv_times, [mxt]))
surv_prob = np.concatenate((surv_prob, [surv_prob[-1]]))
label = getattr(sf, "title", "Group %d" % (gx + 1))
li, = ax.step(surv_times, surv_prob, '-', label=label, lw=2,
where='post')
# Plot the censored points.
ii = np.flatnonzero(np.logical_not(sf.status))
ti = np.unique(sf.time[ii])
jj = np.searchsorted(surv_times, ti) - 1
sp = surv_prob[jj]
ax.plot(ti, sp, '+', ms=12, color=li.get_color(),
label=label + " points")
ax.set_ylim(0, 1.01)
return fig | Plot one or more survivor functions.
Parameters
----------
survfuncs : object or array_like
A single SurvfuncRight object, or a list or SurvfuncRight
objects that are plotted together.
Returns
-------
A figure instance on which the plot was drawn.
Examples
--------
Add a legend:
>>> import statsmodels.api as sm
>>> from statsmodels.duration.survfunc import plot_survfunc
>>> data = sm.datasets.get_rdataset("flchain", "survival").data
>>> df = data.loc[data.sex == "F", :]
>>> sf0 = sm.SurvfuncRight(df["futime"], df["death"])
>>> sf1 = sm.SurvfuncRight(3.0 * df["futime"], df["death"])
>>> fig = plot_survfunc([sf0, sf1])
>>> ax = fig.get_axes()[0]
>>> ax.set_position([0.1, 0.1, 0.64, 0.8])
>>> ha, lb = ax.get_legend_handles_labels()
>>> leg = fig.legend((ha[0], ha[1]), (lb[0], lb[1]), loc='center right')
Change the line colors:
>>> fig = plot_survfunc([sf0, sf1])
>>> ax = fig.get_axes()[0]
>>> ax.set_position([0.1, 0.1, 0.64, 0.8])
>>> ha, lb = ax.get_legend_handles_labels()
>>> ha[0].set_color('purple')
>>> ha[1].set_color('orange') | plot_survfunc | python | statsmodels/statsmodels | statsmodels/duration/survfunc.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/survfunc.py | BSD-3-Clause |
def _kernel_cumincidence(time, status, exog, kfunc, freq_weights,
dimred=True):
"""
Calculates cumulative incidence functions using kernels.
Parameters
----------
time : array_like
The observed time values
status : array_like
The status values. status == 0 indicates censoring,
status == 1, 2, ... are the events.
exog : array_like
Covariates such that censoring becomes independent of
outcome times conditioned on the covariate values.
kfunc : function
A kernel function
freq_weights : array_like
Optional frequency weights
dimred : bool
If True, proportional hazards regression models are used to
reduce exog to two columns by predicting overall events and
censoring in two separate models. If False, exog is used
directly for calculating kernel weights without dimension
reduction.
"""
# Reorder so time is ascending
ii = np.argsort(time)
time = time[ii]
status = status[ii]
exog = exog[ii, :]
nobs = len(time)
# Convert the unique times to ranks (0, 1, 2, ...)
utime, rtime = np.unique(time, return_inverse=True)
# Last index where each unique time occurs.
ie = np.searchsorted(time, utime, side='right') - 1
ngrp = int(status.max())
# All-cause status
statusa = (status >= 1).astype(np.float64)
if freq_weights is not None:
freq_weights = freq_weights / freq_weights.sum()
ip = []
sp = [None] * nobs
n_risk = [None] * nobs
kd = [None] * nobs
for k in range(ngrp):
status0 = (status == k + 1).astype(np.float64)
# Dimension reduction step
if dimred:
sfe = PHReg(time, exog, status0).fit()
fitval_e = sfe.predict().predicted_values
sfc = PHReg(time, exog, 1 - status0).fit()
fitval_c = sfc.predict().predicted_values
exog2d = np.hstack((fitval_e[:, None], fitval_c[:, None]))
exog2d -= exog2d.mean(0)
exog2d /= exog2d.std(0)
else:
exog2d = exog
ip0 = 0
for i in range(nobs):
if k == 0:
kd1 = exog2d - exog2d[i, :]
kd1 = kfunc(kd1)
kd[i] = kd1
# Get the local all-causes survival function
if k == 0:
denom = np.cumsum(kd[i][::-1])[::-1]
num = kd[i] * statusa
rat = num / denom
tr = 1e-15
ii = np.flatnonzero((denom < tr) & (num < tr))
rat[ii] = 0
ratc = 1 - rat
ratc = np.clip(ratc, 1e-10, np.inf)
lrat = np.log(ratc)
prat = np.cumsum(lrat)[ie]
sf = np.exp(prat)
sp[i] = np.r_[1, sf[:-1]]
n_risk[i] = denom[ie]
# Number of cause-specific deaths at each unique time.
d0 = np.bincount(rtime, weights=status0*kd[i],
minlength=len(utime))
# The cumulative incidence function probabilities. Carry
# forward once the effective sample size drops below 1.
ip1 = np.cumsum(sp[i] * d0 / n_risk[i])
jj = len(ip1) - np.searchsorted(n_risk[i][::-1], 1)
if jj < len(ip1):
ip1[jj:] = ip1[jj - 1]
if freq_weights is None:
ip0 += ip1
else:
ip0 += freq_weights[i] * ip1
if freq_weights is None:
ip0 /= nobs
ip.append(ip0)
return utime, ip | Calculates cumulative incidence functions using kernels.
Parameters
----------
time : array_like
The observed time values
status : array_like
The status values. status == 0 indicates censoring,
status == 1, 2, ... are the events.
exog : array_like
Covariates such that censoring becomes independent of
outcome times conditioned on the covariate values.
kfunc : function
A kernel function
freq_weights : array_like
Optional frequency weights
dimred : bool
If True, proportional hazards regression models are used to
reduce exog to two columns by predicting overall events and
censoring in two separate models. If False, exog is used
directly for calculating kernel weights without dimension
reduction. | _kernel_cumincidence | python | statsmodels/statsmodels | statsmodels/duration/_kernel_estimates.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/_kernel_estimates.py | BSD-3-Clause |
def _kernel_survfunc(time, status, exog, kfunc, freq_weights):
"""
Estimate the marginal survival function under dependent censoring.
Parameters
----------
time : array_like
The observed times for each subject
status : array_like
The status for each subject (1 indicates event, 0 indicates
censoring)
exog : array_like
Covariates such that censoring is independent conditional on
exog
kfunc : function
Kernel function
freq_weights : array_like
Optional frequency weights
Returns
-------
probs : array_like
The estimated survival probabilities
times : array_like
The times at which the survival probabilities are estimated
References
----------
Zeng, Donglin 2004. Estimating Marginal Survival Function by
Adjusting for Dependent Censoring Using Many Covariates. The
Annals of Statistics 32 (4): 1533 55.
doi:10.1214/009053604000000508.
https://arxiv.org/pdf/math/0409180.pdf
"""
# Dimension reduction step
sfe = PHReg(time, exog, status).fit()
fitval_e = sfe.predict().predicted_values
sfc = PHReg(time, exog, 1 - status).fit()
fitval_c = sfc.predict().predicted_values
exog2d = np.hstack((fitval_e[:, None], fitval_c[:, None]))
n = len(time)
ixd = np.flatnonzero(status == 1)
# For consistency with standard KM, only compute the survival
# function at the times of observed events.
utime = np.unique(time[ixd])
# Reorder everything so time is ascending
ii = np.argsort(time)
time = time[ii]
status = status[ii]
exog2d = exog2d[ii, :]
# Last index where each evaluation time occurs.
ie = np.searchsorted(time, utime, side='right') - 1
if freq_weights is not None:
freq_weights = freq_weights / freq_weights.sum()
sprob = 0.
for i in range(n):
kd = exog2d - exog2d[i, :]
kd = kfunc(kd)
denom = np.cumsum(kd[::-1])[::-1]
num = kd * status
rat = num / denom
tr = 1e-15
ii = np.flatnonzero((denom < tr) & (num < tr))
rat[ii] = 0
ratc = 1 - rat
ratc = np.clip(ratc, 1e-12, np.inf)
lrat = np.log(ratc)
prat = np.cumsum(lrat)[ie]
prat = np.exp(prat)
if freq_weights is None:
sprob += prat
else:
sprob += prat * freq_weights[i]
if freq_weights is None:
sprob /= n
return sprob, utime | Estimate the marginal survival function under dependent censoring.
Parameters
----------
time : array_like
The observed times for each subject
status : array_like
The status for each subject (1 indicates event, 0 indicates
censoring)
exog : array_like
Covariates such that censoring is independent conditional on
exog
kfunc : function
Kernel function
freq_weights : array_like
Optional frequency weights
Returns
-------
probs : array_like
The estimated survival probabilities
times : array_like
The times at which the survival probabilities are estimated
References
----------
Zeng, Donglin 2004. Estimating Marginal Survival Function by
Adjusting for Dependent Censoring Using Many Covariates. The
Annals of Statistics 32 (4): 1533 55.
doi:10.1214/009053604000000508.
https://arxiv.org/pdf/math/0409180.pdf | _kernel_survfunc | python | statsmodels/statsmodels | statsmodels/duration/_kernel_estimates.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/_kernel_estimates.py | BSD-3-Clause |
def __init__(self, time, status, exog, strata=None, entry=None,
offset=None):
"""
Represent a collection of survival times with possible
stratification and left truncation.
Parameters
----------
time : array_like
The times at which either the event (failure) occurs or
the observation is censored.
status : array_like
Indicates whether the event (failure) occurs at `time`
(`status` is 1), or if `time` is a censoring time (`status`
is 0).
exog : array_like
The exogeneous (covariate) data matrix, cases are rows and
variables are columns.
strata : array_like
Grouping variable defining the strata. If None, all
observations are in a single stratum.
entry : array_like
Entry (left truncation) times. The observation is not
part of the risk set for times before the entry time. If
None, the entry time is treated as being zero, which
gives no left truncation. The entry time must be less
than or equal to `time`.
offset : array_like
An optional array of offsets
"""
# Default strata
if strata is None:
strata = np.zeros(len(time), dtype=np.int32)
# Default entry times
if entry is None:
entry = np.zeros(len(time))
# Parameter validity checks.
self._check(time, status, strata, entry)
# Get the row indices for the cases in each stratum
stu = np.unique(strata)
sth = {x: [] for x in stu}
for i,k in enumerate(strata):
sth[k].append(i)
stratum_rows = [np.asarray(sth[k], dtype=np.int32) for k in stu]
stratum_names = stu
# Remove strata with no events
ix = [i for i,ix in enumerate(stratum_rows) if status[ix].sum() > 0]
self.nstrat_orig = len(stratum_rows)
stratum_rows = [stratum_rows[i] for i in ix]
stratum_names = [stratum_names[i] for i in ix]
# The number of strata
nstrat = len(stratum_rows)
self.nstrat = nstrat
# Remove subjects whose entry time occurs after the last event
# in their stratum.
for stx,ix in enumerate(stratum_rows):
last_failure = max(time[ix][status[ix] == 1])
# Stata uses < here, R uses <=
ii = [i for i,t in enumerate(entry[ix]) if
t <= last_failure]
stratum_rows[stx] = stratum_rows[stx][ii]
# Remove subjects who are censored before the first event in
# their stratum.
for stx,ix in enumerate(stratum_rows):
first_failure = min(time[ix][status[ix] == 1])
ii = [i for i,t in enumerate(time[ix]) if
t >= first_failure]
stratum_rows[stx] = stratum_rows[stx][ii]
# Order by time within each stratum
for stx,ix in enumerate(stratum_rows):
ii = np.argsort(time[ix])
stratum_rows[stx] = stratum_rows[stx][ii]
if offset is not None:
self.offset_s = []
for stx in range(nstrat):
self.offset_s.append(offset[stratum_rows[stx]])
else:
self.offset_s = None
# Number of informative subjects
self.n_obs = sum([len(ix) for ix in stratum_rows])
self.stratum_rows = stratum_rows
self.stratum_names = stratum_names
# Split everything by stratum
self.time_s = self._split(time)
self.exog_s = self._split(exog)
self.status_s = self._split(status)
self.entry_s = self._split(entry)
# Precalculate some indices needed to fit Cox models.
# Distinct failure times within a stratum are always taken to
# be sorted in ascending order.
#
# ufailt_ix[stx][k] is a list of indices for subjects who fail
# at the k^th sorted unique failure time in stratum stx
#
# risk_enter[stx][k] is a list of indices for subjects who
# enter the risk set at the k^th sorted unique failure time in
# stratum stx
#
# risk_exit[stx][k] is a list of indices for subjects who exit
# the risk set at the k^th sorted unique failure time in
# stratum stx
self.ufailt_ix, self.risk_enter, self.risk_exit, self.ufailt =\
[], [], [], []
for stx in range(self.nstrat):
# All failure times
ift = np.flatnonzero(self.status_s[stx] == 1)
ft = self.time_s[stx][ift]
# Unique failure times
uft = np.unique(ft)
nuft = len(uft)
# Indices of cases that fail at each unique failure time
#uft_map = {x:i for i,x in enumerate(uft)} # requires >=2.7
uft_map = {x: i for i,x in enumerate(uft)} # 2.6
uft_ix = [[] for k in range(nuft)]
for ix,ti in zip(ift,ft):
uft_ix[uft_map[ti]].append(ix)
# Indices of cases (failed or censored) that enter the
# risk set at each unique failure time.
risk_enter1 = [[] for k in range(nuft)]
for i,t in enumerate(self.time_s[stx]):
ix = np.searchsorted(uft, t, "right") - 1
if ix >= 0:
risk_enter1[ix].append(i)
# Indices of cases (failed or censored) that exit the
# risk set at each unique failure time.
risk_exit1 = [[] for k in range(nuft)]
for i,t in enumerate(self.entry_s[stx]):
ix = np.searchsorted(uft, t)
risk_exit1[ix].append(i)
self.ufailt.append(uft)
self.ufailt_ix.append([np.asarray(x, dtype=np.int32)
for x in uft_ix])
self.risk_enter.append([np.asarray(x, dtype=np.int32)
for x in risk_enter1])
self.risk_exit.append([np.asarray(x, dtype=np.int32)
for x in risk_exit1]) | Represent a collection of survival times with possible
stratification and left truncation.
Parameters
----------
time : array_like
The times at which either the event (failure) occurs or
the observation is censored.
status : array_like
Indicates whether the event (failure) occurs at `time`
(`status` is 1), or if `time` is a censoring time (`status`
is 0).
exog : array_like
The exogeneous (covariate) data matrix, cases are rows and
variables are columns.
strata : array_like
Grouping variable defining the strata. If None, all
observations are in a single stratum.
entry : array_like
Entry (left truncation) times. The observation is not
part of the risk set for times before the entry time. If
None, the entry time is treated as being zero, which
gives no left truncation. The entry time must be less
than or equal to `time`.
offset : array_like
An optional array of offsets | __init__ | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def from_formula(cls, formula, data, status=None, entry=None,
strata=None, offset=None, subset=None,
ties='breslow', missing='drop', *args, **kwargs):
"""
Create a proportional hazards regression model from a formula
and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array_like
The data for the model. See Notes.
status : array_like
The censoring status values; status=1 indicates that an
event occurred (e.g. failure or death), status=0 indicates
that the observation was right censored. If None, defaults
to status=1 for all cases.
entry : array_like
The entry times, if left truncation occurs
strata : array_like
Stratum labels. If None, all observations are taken to be
in a single stratum.
offset : array_like
Array of offset values
subset : array_like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
ties : str
The method used to handle tied times, must be either 'breslow'
or 'efron'.
missing : str
The method used to handle missing data
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : PHReg model instance
"""
# Allow array arguments to be passed by column name.
if isinstance(status, str):
status = data[status]
if isinstance(entry, str):
entry = data[entry]
if isinstance(strata, str):
strata = data[strata]
if isinstance(offset, str):
offset = data[offset]
import re
terms = re.split(r"[+\-~]", formula)
for term in terms:
term = term.strip()
if term in ("0", "1"):
import warnings
warnings.warn("PHReg formulas should not include any '0' or '1' terms")
advance_eval_env(kwargs)
mod = super().from_formula(formula, data,
status=status, entry=entry, strata=strata,
offset=offset, subset=subset, ties=ties,
missing=missing, drop_cols=["Intercept"], *args,
**kwargs)
return mod | Create a proportional hazards regression model from a formula
and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array_like
The data for the model. See Notes.
status : array_like
The censoring status values; status=1 indicates that an
event occurred (e.g. failure or death), status=0 indicates
that the observation was right censored. If None, defaults
to status=1 for all cases.
entry : array_like
The entry times, if left truncation occurs
strata : array_like
Stratum labels. If None, all observations are taken to be
in a single stratum.
offset : array_like
Array of offset values
subset : array_like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
ties : str
The method used to handle tied times, must be either 'breslow'
or 'efron'.
missing : str
The method used to handle missing data
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : PHReg model instance | from_formula | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def fit(self, groups=None, **args):
"""
Fit a proportional hazards regression model.
Parameters
----------
groups : array_like
Labels indicating groups of observations that may be
dependent. If present, the standard errors account for
this dependence. Does not affect fitted values.
Returns
-------
PHRegResults
Returns a results instance.
"""
# TODO process for missing values
if groups is not None:
if len(groups) != len(self.endog):
msg = ("len(groups) = %d and len(endog) = %d differ" %
(len(groups), len(self.endog)))
raise ValueError(msg)
self.groups = np.asarray(groups)
else:
self.groups = None
if 'disp' not in args:
args['disp'] = False
fit_rslts = super().fit(**args)
if self.groups is None:
cov_params = fit_rslts.cov_params()
else:
cov_params = self.robust_covariance(fit_rslts.params)
results = PHRegResults(self, fit_rslts.params, cov_params)
return results | Fit a proportional hazards regression model.
Parameters
----------
groups : array_like
Labels indicating groups of observations that may be
dependent. If present, the standard errors account for
this dependence. Does not affect fitted values.
Returns
-------
PHRegResults
Returns a results instance. | fit | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def loglike(self, params):
"""
Returns the log partial likelihood function evaluated at
`params`.
"""
if self.ties == "breslow":
return self.breslow_loglike(params)
elif self.ties == "efron":
return self.efron_loglike(params) | Returns the log partial likelihood function evaluated at
`params`. | loglike | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def score(self, params):
"""
Returns the score function evaluated at `params`.
"""
if self.ties == "breslow":
return self.breslow_gradient(params)
elif self.ties == "efron":
return self.efron_gradient(params) | Returns the score function evaluated at `params`. | score | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def hessian(self, params):
"""
Returns the Hessian matrix of the log partial likelihood
function evaluated at `params`.
"""
if self.ties == "breslow":
return self.breslow_hessian(params)
else:
return self.efron_hessian(params) | Returns the Hessian matrix of the log partial likelihood
function evaluated at `params`. | hessian | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def breslow_loglike(self, params):
"""
Returns the value of the log partial likelihood function
evaluated at `params`, using the Breslow method to handle tied
times.
"""
surv = self.surv
like = 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0 = 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
like += (linpred[ix] - np.log(xp0)).sum()
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
return like | Returns the value of the log partial likelihood function
evaluated at `params`, using the Breslow method to handle tied
times. | breslow_loglike | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def efron_loglike(self, params):
"""
Returns the value of the log partial likelihood function
evaluated at `params`, using the Efron method to handle tied
times.
"""
surv = self.surv
like = 0.
# Loop over strata
for stx in range(surv.nstrat):
# exog and linear predictor for this stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0 = 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
xp0f = e_linpred[uft_ix[i]].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
like += linpred[ix].sum()
m = len(ix)
J = np.arange(m, dtype=np.float64) / m
like -= np.log(xp0 - J*xp0f).sum()
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
return like | Returns the value of the log partial likelihood function
evaluated at `params`, using the Efron method to handle tied
times. | efron_loglike | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def breslow_gradient(self, params):
"""
Returns the gradient of the log partial likelihood, using the
Breslow method to handle tied times.
"""
surv = self.surv
grad = 0.
# Loop over strata
for stx in range(surv.nstrat):
# Indices of subjects in the stratum
# strat_ix = surv.stratum_rows[stx]
# Unique failure times in the stratum
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
# exog and linear predictor for the stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1 = 0., 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 += e_linpred[ix].sum()
xp1 += (e_linpred[ix][:,None] * v).sum(0)
# Account for all cases that fail at this point.
ix = uft_ix[i]
grad += (exog_s[ix,:] - xp1 / xp0).sum(0)
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 -= e_linpred[ix].sum()
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
return grad | Returns the gradient of the log partial likelihood, using the
Breslow method to handle tied times. | breslow_gradient | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def efron_gradient(self, params):
"""
Returns the gradient of the log partial likelihood evaluated
at `params`, using the Efron method to handle tied times.
"""
surv = self.surv
grad = 0.
# Loop over strata
for stx in range(surv.nstrat):
# Indices of cases in the stratum
# strat_ix = surv.stratum_rows[stx]
# exog and linear predictor of the stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1 = 0., 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 += e_linpred[ix].sum()
xp1 += (e_linpred[ix][:,None] * v).sum(0)
ixf = uft_ix[i]
if len(ixf) > 0:
v = exog_s[ixf,:]
xp0f = e_linpred[ixf].sum()
xp1f = (e_linpred[ixf][:,None] * v).sum(0)
# Consider all cases that fail at this point.
grad += v.sum(0)
m = len(ixf)
J = np.arange(m, dtype=np.float64) / m
numer = xp1 - np.outer(J, xp1f)
denom = xp0 - np.outer(J, xp0f)
ratio = numer / denom
rsum = ratio.sum(0)
grad -= rsum
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 -= e_linpred[ix].sum()
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
return grad | Returns the gradient of the log partial likelihood evaluated
at `params`, using the Efron method to handle tied times. | efron_gradient | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def breslow_hessian(self, params):
"""
Returns the Hessian of the log partial likelihood evaluated at
`params`, using the Breslow method to handle tied times.
"""
surv = self.surv
hess = 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1, xp2 = 0., 0., 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
xp0 += e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 += (e_linpred[ix][:,None] * v).sum(0)
elx = e_linpred[ix]
xp2 += np.einsum("ij,ik,i->jk", v, v, elx)
# Account for all cases that fail at this point.
m = len(uft_ix[i])
hess += m*(xp2 / xp0 - np.outer(xp1, xp1) / xp0**2)
# Update for new cases entering the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
xp0 -= e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
elx = e_linpred[ix]
xp2 -= np.einsum("ij,ik,i->jk", v, v, elx)
return -hess | Returns the Hessian of the log partial likelihood evaluated at
`params`, using the Breslow method to handle tied times. | breslow_hessian | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def efron_hessian(self, params):
"""
Returns the Hessian matrix of the partial log-likelihood
evaluated at `params`, using the Efron method to handle tied
times.
"""
surv = self.surv
hess = 0.
# Loop over strata
for stx in range(surv.nstrat):
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1, xp2 = 0., 0., 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
xp0 += e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 += (e_linpred[ix][:,None] * v).sum(0)
elx = e_linpred[ix]
xp2 += np.einsum("ij,ik,i->jk", v, v, elx)
ixf = uft_ix[i]
if len(ixf) > 0:
v = exog_s[ixf,:]
xp0f = e_linpred[ixf].sum()
xp1f = (e_linpred[ixf][:,None] * v).sum(0)
elx = e_linpred[ixf]
xp2f = np.einsum("ij,ik,i->jk", v, v, elx)
# Account for all cases that fail at this point.
m = len(uft_ix[i])
J = np.arange(m, dtype=np.float64) / m
c0 = xp0 - J*xp0f
hess += xp2 * np.sum(1 / c0)
hess -= xp2f * np.sum(J / c0)
mat = (xp1[None, :] - np.outer(J, xp1f)) / c0[:, None]
hess -= np.einsum("ij,ik->jk", mat, mat)
# Update for new cases entering the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
xp0 -= e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
elx = e_linpred[ix]
xp2 -= np.einsum("ij,ik,i->jk", v, v, elx)
return -hess | Returns the Hessian matrix of the partial log-likelihood
evaluated at `params`, using the Efron method to handle tied
times. | efron_hessian | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def robust_covariance(self, params):
"""
Returns a covariance matrix for the proportional hazards model
regresion coefficient estimates that is robust to certain
forms of model misspecification.
Parameters
----------
params : ndarray
The parameter vector at which the covariance matrix is
calculated.
Returns
-------
The robust covariance matrix as a square ndarray.
Notes
-----
This function uses the `groups` argument to determine groups
within which observations may be dependent. The covariance
matrix is calculated using the Huber-White "sandwich" approach.
"""
if self.groups is None:
raise ValueError("`groups` must be specified to calculate the robust covariance matrix")
hess = self.hessian(params)
score_obs = self.score_residuals(params)
# Collapse
grads = {}
for i,g in enumerate(self.groups):
if g not in grads:
grads[g] = 0.
grads[g] += score_obs[i, :]
grads = np.asarray(list(grads.values()))
mat = grads[None, :, :]
mat = mat.T * mat
mat = mat.sum(1)
hess_inv = np.linalg.inv(hess)
cmat = np.dot(hess_inv, np.dot(mat, hess_inv))
return cmat | Returns a covariance matrix for the proportional hazards model
regresion coefficient estimates that is robust to certain
forms of model misspecification.
Parameters
----------
params : ndarray
The parameter vector at which the covariance matrix is
calculated.
Returns
-------
The robust covariance matrix as a square ndarray.
Notes
-----
This function uses the `groups` argument to determine groups
within which observations may be dependent. The covariance
matrix is calculated using the Huber-White "sandwich" approach. | robust_covariance | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def score_residuals(self, params):
"""
Returns the score residuals calculated at a given vector of
parameters.
Parameters
----------
params : ndarray
The parameter vector at which the score residuals are
calculated.
Returns
-------
The score residuals, returned as a ndarray having the same
shape as `exog`.
Notes
-----
Observations in a stratum with no observed events have undefined
score residuals, and contain NaN in the returned matrix.
"""
surv = self.surv
score_resid = np.zeros(self.exog.shape, dtype=np.float64)
# Use to set undefined values to NaN.
mask = np.zeros(self.exog.shape[0], dtype=np.int32)
w_avg = self.weighted_covariate_averages(params)
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
strat_ix = surv.stratum_rows[stx]
xp0 = 0.
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
at_risk_ix = set()
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
at_risk_ix |= set(ix)
xp0 += e_linpred[ix].sum()
atr_ix = list(at_risk_ix)
leverage = exog_s[atr_ix, :] - w_avg[stx][i, :]
# Event indicators
d = np.zeros(exog_s.shape[0])
d[uft_ix[i]] = 1
# The increment in the cumulative hazard
dchaz = len(uft_ix[i]) / xp0
# Piece of the martingale residual
mrp = d[atr_ix] - e_linpred[atr_ix] * dchaz
# Update the score residuals
ii = strat_ix[atr_ix]
score_resid[ii,:] += leverage * mrp[:, None]
mask[ii] = 1
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
at_risk_ix -= set(ix)
xp0 -= e_linpred[ix].sum()
jj = np.flatnonzero(mask == 0)
if len(jj) > 0:
score_resid[jj, :] = np.nan
return score_resid | Returns the score residuals calculated at a given vector of
parameters.
Parameters
----------
params : ndarray
The parameter vector at which the score residuals are
calculated.
Returns
-------
The score residuals, returned as a ndarray having the same
shape as `exog`.
Notes
-----
Observations in a stratum with no observed events have undefined
score residuals, and contain NaN in the returned matrix. | score_residuals | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def weighted_covariate_averages(self, params):
"""
Returns the hazard-weighted average of covariate values for
subjects who are at-risk at a particular time.
Parameters
----------
params : ndarray
Parameter vector
Returns
-------
averages : list of ndarrays
averages[stx][i,:] is a row vector containing the weighted
average values (for all the covariates) of at-risk
subjects a the i^th largest observed failure time in
stratum `stx`, using the hazard multipliers as weights.
Notes
-----
Used to calculate leverages and score residuals.
"""
surv = self.surv
averages = []
xp0, xp1 = 0., 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
average_s = np.zeros((len(uft_ix), exog_s.shape[1]),
dtype=np.float64)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
xp1 += np.dot(e_linpred[ix], exog_s[ix, :])
average_s[i, :] = xp1 / xp0
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
xp1 -= np.dot(e_linpred[ix], exog_s[ix, :])
averages.append(average_s)
return averages | Returns the hazard-weighted average of covariate values for
subjects who are at-risk at a particular time.
Parameters
----------
params : ndarray
Parameter vector
Returns
-------
averages : list of ndarrays
averages[stx][i,:] is a row vector containing the weighted
average values (for all the covariates) of at-risk
subjects a the i^th largest observed failure time in
stratum `stx`, using the hazard multipliers as weights.
Notes
-----
Used to calculate leverages and score residuals. | weighted_covariate_averages | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def baseline_cumulative_hazard(self, params):
"""
Estimate the baseline cumulative hazard and survival
functions.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
A list of triples (time, hazard, survival) containing the time
values and corresponding cumulative hazard and survival
function values for each stratum.
Notes
-----
Uses the Nelson-Aalen estimator.
"""
# TODO: some disagreements with R, not the same algorithm but
# hard to deduce what R is doing. Our results are reasonable.
surv = self.surv
rslt = []
# Loop over strata
for stx in range(surv.nstrat):
uft = surv.ufailt[stx]
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
xp0 = 0.
h0 = np.zeros(nuft, dtype=np.float64)
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
h0[i] = len(ix) / xp0
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
cumhaz = np.cumsum(h0) - h0
current_strata_surv = np.exp(-cumhaz)
rslt.append([uft, cumhaz, current_strata_surv])
return rslt | Estimate the baseline cumulative hazard and survival
functions.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
A list of triples (time, hazard, survival) containing the time
values and corresponding cumulative hazard and survival
function values for each stratum.
Notes
-----
Uses the Nelson-Aalen estimator. | baseline_cumulative_hazard | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def baseline_cumulative_hazard_function(self, params):
"""
Returns a function that calculates the baseline cumulative
hazard function for each stratum.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
A dict mapping stratum names to the estimated baseline
cumulative hazard function.
"""
from scipy.interpolate import interp1d
surv = self.surv
base = self.baseline_cumulative_hazard(params)
cumhaz_f = {}
for stx in range(surv.nstrat):
time_h = base[stx][0]
cumhaz = base[stx][1]
time_h = np.r_[-np.inf, time_h, np.inf]
cumhaz = np.r_[cumhaz[0], cumhaz, cumhaz[-1]]
func = interp1d(time_h, cumhaz, kind='zero')
cumhaz_f[self.surv.stratum_names[stx]] = func
return cumhaz_f | Returns a function that calculates the baseline cumulative
hazard function for each stratum.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
A dict mapping stratum names to the estimated baseline
cumulative hazard function. | baseline_cumulative_hazard_function | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def get_distribution(self, params, scale=1.0, exog=None):
"""
Returns a scipy distribution object corresponding to the
distribution of uncensored endog (duration) values for each
case.
Parameters
----------
params : array_like
The proportional hazards model parameters.
scale : float
Present for compatibility, not used.
exog : array_like
A design matrix, defaults to model.exog.
Returns
-------
A list of objects of type scipy.stats.distributions.rv_discrete
Notes
-----
The distributions are obtained from a simple discrete estimate
of the survivor function that puts all mass on the observed
failure times within a stratum.
"""
surv = self.surv
bhaz = self.baseline_cumulative_hazard(params)
# The arguments to rv_discrete_float, first obtained by
# stratum
pk, xk = [], []
if exog is None:
exog_split = surv.exog_s
else:
exog_split = self.surv._split(exog)
for stx in range(self.surv.nstrat):
exog_s = exog_split[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
# The unique failure times for this stratum (the support
# of the distribution).
pts = bhaz[stx][0]
# The individual cumulative hazards for everyone in this
# stratum.
ichaz = np.outer(e_linpred, bhaz[stx][1])
# The individual survival functions.
usurv = np.exp(-ichaz)
z = np.zeros((usurv.shape[0], 1))
usurv = np.concatenate((usurv, z), axis=1)
# The individual survival probability masses.
probs = -np.diff(usurv, 1)
pk.append(probs)
xk.append(np.outer(np.ones(probs.shape[0]), pts))
# Pad to make all strata have the same shape
mxc = max([x.shape[1] for x in xk])
for k in range(self.surv.nstrat):
if xk[k].shape[1] < mxc:
xk1 = np.zeros((xk[k].shape[0], mxc))
pk1 = np.zeros((pk[k].shape[0], mxc))
xk1[:, 0:xk[k].shape[1]] = xk[k]
pk1[:, 0:pk[k].shape[1]] = pk[k]
xk[k], pk[k] = xk1, pk1
# Put the support points and probabilities into single matrices
xka = np.nan * np.ones((len(self.endog), mxc))
pka = np.ones((len(self.endog), mxc), dtype=np.float64) / mxc
for stx in range(self.surv.nstrat):
ix = self.surv.stratum_rows[stx]
xka[ix, :] = xk[stx]
pka[ix, :] = pk[stx]
dist = rv_discrete_float(xka, pka)
return dist | Returns a scipy distribution object corresponding to the
distribution of uncensored endog (duration) values for each
case.
Parameters
----------
params : array_like
The proportional hazards model parameters.
scale : float
Present for compatibility, not used.
exog : array_like
A design matrix, defaults to model.exog.
Returns
-------
A list of objects of type scipy.stats.distributions.rv_discrete
Notes
-----
The distributions are obtained from a simple discrete estimate
of the survivor function that puts all mass on the observed
failure times within a stratum. | get_distribution | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def standard_errors(self):
"""
Returns the standard errors of the parameter estimates.
"""
return np.sqrt(np.diag(self.cov_params())) | Returns the standard errors of the parameter estimates. | standard_errors | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def bse(self):
"""
Returns the standard errors of the parameter estimates.
"""
return self.standard_errors | Returns the standard errors of the parameter estimates. | bse | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def get_distribution(self):
"""
Returns a scipy distribution object corresponding to the
distribution of uncensored endog (duration) values for each
case.
Returns
-------
A list of objects of type scipy.stats.distributions.rv_discrete
Notes
-----
The distributions are obtained from a simple discrete estimate
of the survivor function that puts all mass on the observed
failure times within a stratum.
"""
return self.model.get_distribution(self.params) | Returns a scipy distribution object corresponding to the
distribution of uncensored endog (duration) values for each
case.
Returns
-------
A list of objects of type scipy.stats.distributions.rv_discrete
Notes
-----
The distributions are obtained from a simple discrete estimate
of the survivor function that puts all mass on the observed
failure times within a stratum. | get_distribution | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def _group_stats(self, groups):
"""
Descriptive statistics of the groups.
"""
gsizes = np.unique(groups, return_counts=True)
gsizes = gsizes[1]
return gsizes.min(), gsizes.max(), gsizes.mean(), len(gsizes) | Descriptive statistics of the groups. | _group_stats | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def weighted_covariate_averages(self):
"""
The average covariate values within the at-risk set at each
event time point, weighted by hazard.
"""
return self.model.weighted_covariate_averages(self.params) | The average covariate values within the at-risk set at each
event time point, weighted by hazard. | weighted_covariate_averages | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def score_residuals(self):
"""
A matrix containing the score residuals.
"""
return self.model.score_residuals(self.params) | A matrix containing the score residuals. | score_residuals | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def baseline_cumulative_hazard(self):
"""
A list (corresponding to the strata) containing the baseline
cumulative hazard function evaluated at the event points.
"""
return self.model.baseline_cumulative_hazard(self.params) | A list (corresponding to the strata) containing the baseline
cumulative hazard function evaluated at the event points. | baseline_cumulative_hazard | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def baseline_cumulative_hazard_function(self):
"""
A list (corresponding to the strata) containing function
objects that calculate the cumulative hazard function.
"""
return self.model.baseline_cumulative_hazard_function(self.params) | A list (corresponding to the strata) containing function
objects that calculate the cumulative hazard function. | baseline_cumulative_hazard_function | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def schoenfeld_residuals(self):
"""
A matrix containing the Schoenfeld residuals.
Notes
-----
Schoenfeld residuals for censored observations are set to zero.
"""
surv = self.model.surv
w_avg = self.weighted_covariate_averages
# Initialize at NaN since rows that belong to strata with no
# events have undefined residuals.
sch_resid = np.nan*np.ones(self.model.exog.shape, dtype=np.float64)
# Loop over strata
for stx in range(surv.nstrat):
uft = surv.ufailt[stx]
exog_s = surv.exog_s[stx]
time_s = surv.time_s[stx]
strat_ix = surv.stratum_rows[stx]
ii = np.searchsorted(uft, time_s)
# These subjects are censored after the last event in
# their stratum, so have empty risk sets and undefined
# residuals.
jj = np.flatnonzero(ii < len(uft))
sch_resid[strat_ix[jj], :] = exog_s[jj, :] - w_avg[stx][ii[jj], :]
jj = np.flatnonzero(self.model.status == 0)
sch_resid[jj, :] = np.nan
return sch_resid | A matrix containing the Schoenfeld residuals.
Notes
-----
Schoenfeld residuals for censored observations are set to zero. | schoenfeld_residuals | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def martingale_residuals(self):
"""
The martingale residuals.
"""
surv = self.model.surv
# Initialize at NaN since rows that belong to strata with no
# events have undefined residuals.
mart_resid = np.nan*np.ones(len(self.model.endog), dtype=np.float64)
cumhaz_f_list = self.baseline_cumulative_hazard_function
# Loop over strata
for stx in range(surv.nstrat):
cumhaz_f = cumhaz_f_list[stx]
exog_s = surv.exog_s[stx]
time_s = surv.time_s[stx]
linpred = np.dot(exog_s, self.params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
ii = surv.stratum_rows[stx]
chaz = cumhaz_f(time_s)
mart_resid[ii] = self.model.status[ii] - e_linpred * chaz
return mart_resid | The martingale residuals. | martingale_residuals | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the proportional hazards regression results.
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is `x#` for ## in p the
number of regressors. Must match the number of parameters in
the model
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
float_format = "%8.3f"
info = {}
info["Model:"] = "PH Reg"
if yname is None:
yname = self.model.endog_names
info["Dependent variable:"] = yname
info["Ties:"] = self.model.ties.capitalize()
info["Sample size:"] = str(self.model.surv.n_obs)
info["Num. events:"] = str(int(sum(self.model.status)))
if self.model.groups is not None:
mn, mx, avg, num = self._group_stats(self.model.groups)
info["Num groups:"] = "%.0f" % num
info["Min group size:"] = "%.0f" % mn
info["Max group size:"] = "%.0f" % mx
info["Avg group size:"] = "%.1f" % avg
if self.model.strata is not None:
mn, mx, avg, num = self._group_stats(self.model.strata)
info["Num strata:"] = "%.0f" % num
info["Min stratum size:"] = "%.0f" % mn
info["Max stratum size:"] = "%.0f" % mx
info["Avg stratum size:"] = "%.1f" % avg
smry.add_dict(info, align='l', float_format=float_format)
param = summary2.summary_params(self, alpha=alpha)
param = param.rename(columns={"Coef.": "log HR",
"Std.Err.": "log HR SE"})
param.insert(2, "HR", np.exp(param["log HR"]))
a = "[%.3f" % (alpha / 2)
param.loc[:, a] = np.exp(param.loc[:, a])
a = "%.3f]" % (1 - alpha / 2)
param.loc[:, a] = np.exp(param.loc[:, a])
if xname is not None:
param.index = xname
smry.add_df(param, float_format=float_format)
smry.add_title(title=title, results=self)
smry.add_text("Confidence intervals are for the hazard ratios")
dstrat = self.model.surv.nstrat_orig - self.model.surv.nstrat
if dstrat > 0:
if dstrat == 1:
smry.add_text("1 stratum dropped for having no events")
else:
smry.add_text("%d strata dropped for having no events" % dstrat)
if self.model.entry is not None:
n_entry = sum(self.model.entry != 0)
if n_entry == 1:
smry.add_text("1 observation has a positive entry time")
else:
smry.add_text("%d observations have positive entry times" % n_entry)
if self.model.groups is not None:
smry.add_text("Standard errors account for dependence within groups")
if hasattr(self, "regularized"):
smry.add_text("Standard errors do not account for the regularization")
return smry | Summarize the proportional hazards regression results.
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is `x#` for ## in p the
number of regressors. Must match the number of parameters in
the model
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results | summary | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def rvs(self, n=None):
"""
Returns a random sample from the discrete distribution.
A vector is returned containing a single draw from each row of
`xk`, using the probabilities of the corresponding row of `pk`
Parameters
----------
n : not used
Present for signature compatibility
"""
n = self.xk.shape[0]
u = np.random.uniform(size=n)
ix = (self.cpk < u[:, None]).sum(1)
ii = np.arange(n, dtype=np.int32)
return self.xk[(ii,ix)] | Returns a random sample from the discrete distribution.
A vector is returned containing a single draw from each row of
`xk`, using the probabilities of the corresponding row of `pk`
Parameters
----------
n : not used
Present for signature compatibility | rvs | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def mean(self):
"""
Returns a vector containing the mean values of the discrete
distributions.
A vector is returned containing the mean value of each row of
`xk`, using the probabilities in the corresponding row of
`pk`.
"""
return (self.xk * self.pk).sum(1) | Returns a vector containing the mean values of the discrete
distributions.
A vector is returned containing the mean value of each row of
`xk`, using the probabilities in the corresponding row of
`pk`. | mean | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def var(self):
"""
Returns a vector containing the variances of the discrete
distributions.
A vector is returned containing the variance for each row of
`xk`, using the probabilities in the corresponding row of
`pk`.
"""
mn = self.mean()
xkc = self.xk - mn[:, None]
return (self.pk * (self.xk - xkc)**2).sum(1) | Returns a vector containing the variances of the discrete
distributions.
A vector is returned containing the variance for each row of
`xk`, using the probabilities in the corresponding row of
`pk`. | var | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def std(self):
"""
Returns a vector containing the standard deviations of the
discrete distributions.
A vector is returned containing the standard deviation for
each row of `xk`, using the probabilities in the corresponding
row of `pk`.
"""
return np.sqrt(self.var()) | Returns a vector containing the standard deviations of the
discrete distributions.
A vector is returned containing the standard deviation for
each row of `xk`, using the probabilities in the corresponding
row of `pk`. | std | python | statsmodels/statsmodels | statsmodels/duration/hazard_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/hazard_regression.py | BSD-3-Clause |
def test_formula_environment(self):
"""Test that PHReg uses the right environment for formulas."""
def times_two(x):
return 2 * x
rng = np.random.default_rng(0)
exog = rng.uniform(size=100)
endog = np.exp(exog) * -np.log(rng.uniform(size=len(exog)))
data = pd.DataFrame({"endog": endog, "exog": exog})
result_direct = PHReg(endog, times_two(exog)).fit()
result_formula = PHReg.from_formula("endog ~ times_two(exog)", data=data).fit()
assert_allclose(result_direct.params, result_formula.params)
assert_allclose(result_direct.bse, result_formula.bse) | Test that PHReg uses the right environment for formulas. | test_formula_environment | python | statsmodels/statsmodels | statsmodels/duration/tests/test_phreg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/duration/tests/test_phreg.py | BSD-3-Clause |
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
XB = np.dot(self.exog, params)
endog = self.endog
return np.exp(XB) - endog*XB + np.log(factorial(endog)) | Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right] | nloglikeobs | python | statsmodels/statsmodels | statsmodels/miscmodels/count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/count.py | BSD-3-Clause |
def predict_distribution(self, exog):
'''return frozen scipy.stats distribution with mu at estimated prediction
'''
if not hasattr(self, "result"):
# TODO: why would this be ValueError instead of AttributeError?
# TODO: Why even make this a Model attribute in the first place?
# It belongs on the Results class
raise ValueError
else:
result = self.result
params = result.params
mu = np.exp(np.dot(exog, params))
return stats.poisson(mu, loc=0) | return frozen scipy.stats distribution with mu at estimated prediction | predict_distribution | python | statsmodels/statsmodels | statsmodels/miscmodels/count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/count.py | BSD-3-Clause |
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
XB = self.offset + np.dot(self.exog, params)
endog = self.endog
nloglik = np.exp(XB) - endog*XB + np.log(factorial(endog))
return nloglik | Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right] | nloglikeobs | python | statsmodels/statsmodels | statsmodels/miscmodels/count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/count.py | BSD-3-Clause |
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
beta = params[:-1]
gamm = 1 / (1 + np.exp(params[-1])) #check this
# replace with np.dot(self.exogZ, gamma)
#print(np.shape(self.offset), self.exog.shape, beta.shape
XB = self.offset + np.dot(self.exog, beta)
endog = self.endog
nloglik = -np.log(1-gamm) + np.exp(XB) - endog*XB + np.log(factorial(endog))
nloglik[endog==0] = - np.log(gamm + np.exp(-nloglik[endog==0]))
return nloglik | Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right] | nloglikeobs | python | statsmodels/statsmodels | statsmodels/miscmodels/count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/count.py | BSD-3-Clause |
def fit_minimal(self, start_value, **kwargs):
'''minimal fitting with no extra calculations'''
func = self.geterrors
res = optimize.leastsq(func, start_value, full_output=0, **kwargs)
return res | minimal fitting with no extra calculations | fit_minimal | python | statsmodels/statsmodels | statsmodels/miscmodels/nonlinls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/nonlinls.py | BSD-3-Clause |
def fit_random(self, ntries=10, rvs_generator=None, nparams=None):
'''fit with random starting values
this could be replaced with a global fitter
'''
if nparams is None:
nparams = self.nparams
if rvs_generator is None:
rvs = np.random.uniform(low=-10, high=10, size=(ntries, nparams))
else:
rvs = rvs_generator(size=(ntries, nparams))
results = np.array([np.r_[self.fit_minimal(rv), rv] for rv in rvs])
#selct best results and check how many solutions are within 1e-6 of best
#not sure what leastsq returns
return results | fit with random starting values
this could be replaced with a global fitter | fit_random | python | statsmodels/statsmodels | statsmodels/miscmodels/nonlinls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/nonlinls.py | BSD-3-Clause |
def jac_predict(self, params):
'''jacobian of prediction function using complex step derivative
This assumes that the predict function does not use complex variable
but is designed to do so.
'''
from statsmodels.tools.numdiff import approx_fprime_cs
jaccs_err = approx_fprime_cs(params, self._predict)
return jaccs_err | jacobian of prediction function using complex step derivative
This assumes that the predict function does not use complex variable
but is designed to do so. | jac_predict | python | statsmodels/statsmodels | statsmodels/miscmodels/nonlinls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/nonlinls.py | BSD-3-Clause |
def mvn_loglike_sum(x, sigma):
'''loglike multivariate normal
copied from GLS and adjusted names
not sure why this differes from mvn_loglike
'''
nobs = len(x)
nobs2 = nobs / 2.0
SSR = (x**2).sum()
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant
if np.any(sigma) and sigma.ndim == 2:
#FIXME: robust-enough check? unneeded if _det_sigma gets defined
llf -= .5*np.log(np.linalg.det(sigma))
return llf | loglike multivariate normal
copied from GLS and adjusted names
not sure why this differes from mvn_loglike | mvn_loglike_sum | python | statsmodels/statsmodels | statsmodels/miscmodels/try_mlecov.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/try_mlecov.py | BSD-3-Clause |
def mvn_loglike(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
sigmainv = linalg.inv(sigma)
logdetsigma = np.log(np.linalg.det(sigma))
nobs = len(x)
llf = - np.dot(x, np.dot(sigmainv, x))
llf -= nobs * np.log(2 * np.pi)
llf -= logdetsigma
llf *= 0.5
return llf | loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient | mvn_loglike | python | statsmodels/statsmodels | statsmodels/miscmodels/try_mlecov.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/try_mlecov.py | BSD-3-Clause |
def mvn_loglike_chol(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
sigmainv = np.linalg.inv(sigma)
cholsigmainv = np.linalg.cholesky(sigmainv).T
x_whitened = np.dot(cholsigmainv, x)
logdetsigma = np.log(np.linalg.det(sigma))
nobs = len(x)
from scipy import stats
print('scipy.stats')
print(np.log(stats.norm.pdf(x_whitened)).sum())
llf = - np.dot(x_whitened.T, x_whitened)
llf -= nobs * np.log(2 * np.pi)
llf -= logdetsigma
llf *= 0.5
return llf, logdetsigma, 2 * np.sum(np.log(np.diagonal(cholsigmainv))) | loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient | mvn_loglike_chol | python | statsmodels/statsmodels | statsmodels/miscmodels/try_mlecov.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/try_mlecov.py | BSD-3-Clause |
def mvn_nloglike_obs(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
#Still wasteful to calculate pinv first
sigmainv = np.linalg.inv(sigma)
cholsigmainv = np.linalg.cholesky(sigmainv).T
#2 * np.sum(np.log(np.diagonal(np.linalg.cholesky(A)))) #Dag mailinglist
# logdet not needed ???
#logdetsigma = 2 * np.sum(np.log(np.diagonal(cholsigmainv)))
x_whitened = np.dot(cholsigmainv, x)
# Unused, commented out
# sigmainv = linalg.cholesky(sigma)
# logdetsigma = np.log(np.linalg.det(sigma))
sigma2 = 1. # error variance is included in sigma
llike = 0.5 * (np.log(sigma2) - 2.* np.log(np.diagonal(cholsigmainv))
+ (x_whitened**2)/sigma2
+ np.log(2*np.pi))
return llike | loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient | mvn_nloglike_obs | python | statsmodels/statsmodels | statsmodels/miscmodels/try_mlecov.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/try_mlecov.py | BSD-3-Clause |
def _params2cov(self, params, nobs):
'''get autocovariance matrix from ARMA regression parameter
ar parameters are assumed to have rhs parameterization
'''
ar = np.r_[[1], -params[:self.nar]]
ma = np.r_[[1], params[-self.nma:]]
#print('ar', ar
#print('ma', ma
#print('nobs', nobs
autocov = arma_acovf(ar, ma, nobs=nobs)
#print('arma_acovf(%r, %r, nobs=%d)' % (ar, ma, nobs)
#print(autocov.shape
#something is strange fixed in aram_acovf
autocov = autocov[:nobs]
sigma = toeplitz(autocov)
return sigma | get autocovariance matrix from ARMA regression parameter
ar parameters are assumed to have rhs parameterization | _params2cov | python | statsmodels/statsmodels | statsmodels/miscmodels/try_mlecov.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/try_mlecov.py | BSD-3-Clause |
def nloglikeobs(self, params):
"""
Loglikelihood of linear model with t distributed errors.
Parameters
----------
params : ndarray
The parameters of the model. The last 2 parameters are degrees of
freedom and scale.
Returns
-------
loglike : ndarray
The log likelihood of the model evaluated at `params` for each
observation defined by self.endog and self.exog.
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
The t distribution is the standard t distribution and not a standardized
t distribution, which means that the scale parameter is not equal to the
standard deviation.
self.fixed_params and self.expandparams can be used to fix some
parameters. (I doubt this has been tested in this model.)
"""
#print len(params),
#store_params.append(params)
if self.fixed_params is not None:
#print 'using fixed'
params = self.expandparams(params)
beta = params[:-2]
df = params[-2]
scale = np.abs(params[-1]) #TODO check behavior around zero
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx | Loglikelihood of linear model with t distributed errors.
Parameters
----------
params : ndarray
The parameters of the model. The last 2 parameters are degrees of
freedom and scale.
Returns
-------
loglike : ndarray
The log likelihood of the model evaluated at `params` for each
observation defined by self.endog and self.exog.
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
The t distribution is the standard t distribution and not a standardized
t distribution, which means that the scale parameter is not equal to the
standard deviation.
self.fixed_params and self.expandparams can be used to fix some
parameters. (I doubt this has been tested in this model.) | nloglikeobs | python | statsmodels/statsmodels | statsmodels/miscmodels/tmodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/tmodel.py | BSD-3-Clause |
def nloglikeobs(self, params):
"""
Loglikelihood for arma model for each observation, t-distribute
Notes
-----
The ancillary parameter is assumed to be the last element of
the params vector
"""
errorsest = self.geterrors(params[:-2])
#sigma2 = np.maximum(params[-1]**2, 1e-6) #do I need this
#axis = 0
#nobs = len(errorsest)
df = params[-2]
scale = np.abs(params[-1])
llike = - stats.t._logpdf(errorsest/scale, df) + np_log(scale)
return llike | Loglikelihood for arma model for each observation, t-distribute
Notes
-----
The ancillary parameter is assumed to be the last element of
the params vector | nloglikeobs | python | statsmodels/statsmodels | statsmodels/miscmodels/tmodel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/tmodel.py | BSD-3-Clause |
def _check_inputs(self, endog, exog):
"""Handle endog that is pandas Categorical.
Checks if self.distrib is legal and provides Pandas ordered Categorical
support for endog.
Parameters
----------
endog : array_like
Endogenous, dependent variable, 1-D.
exog : array_like
Exogenous, explanatory variables.
Currently not used.
Returns
-------
endog : array_like or pandas Series
If the original endog is a pandas ordered Categorical Series,
then the returned endog are the ``codes``, i.e. integer
representation of ordere categorical variable
labels : None or list
If original endog is pandas ordered Categorical Series, then the
categories are returned. Otherwise ``labels`` is None.
is_pandas : bool
This is True if original endog is a pandas ordered Categorical
Series and False otherwise.
"""
if not isinstance(self.distr, stats.rv_continuous):
msg = (
f"{self.distr.name} is not a scipy.stats distribution."
)
warnings.warn(msg)
labels = None
is_pandas = False
if isinstance(endog, pd.Series):
if isinstance(endog.dtypes, CategoricalDtype):
if not endog.dtype.ordered:
warnings.warn("the endog has ordered == False, "
"risk of capturing a wrong order for the "
"categories. ordered == True preferred.",
Warning)
endog_name = endog.name
labels = endog.values.categories
endog = endog.cat.codes
if endog.min() == -1: # means there is a missing value
raise ValueError("missing values in categorical endog are "
"not supported")
endog.name = endog_name
is_pandas = True
return endog, labels, is_pandas | Handle endog that is pandas Categorical.
Checks if self.distrib is legal and provides Pandas ordered Categorical
support for endog.
Parameters
----------
endog : array_like
Endogenous, dependent variable, 1-D.
exog : array_like
Exogenous, explanatory variables.
Currently not used.
Returns
-------
endog : array_like or pandas Series
If the original endog is a pandas ordered Categorical Series,
then the returned endog are the ``codes``, i.e. integer
representation of ordere categorical variable
labels : None or list
If original endog is pandas ordered Categorical Series, then the
categories are returned. Otherwise ``labels`` is None.
is_pandas : bool
This is True if original endog is a pandas ordered Categorical
Series and False otherwise. | _check_inputs | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def cdf(self, x):
"""Cdf evaluated at x.
Parameters
----------
x : array_like
Points at which cdf is evaluated. In the model `x` is the latent
variable plus threshold constants.
Returns
-------
Value of the cumulative distribution function of the underlying latent
variable evaluated at x.
"""
return self.distr.cdf(x) | Cdf evaluated at x.
Parameters
----------
x : array_like
Points at which cdf is evaluated. In the model `x` is the latent
variable plus threshold constants.
Returns
-------
Value of the cumulative distribution function of the underlying latent
variable evaluated at x. | cdf | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def pdf(self, x):
"""Pdf evaluated at x
Parameters
----------
x : array_like
Points at which cdf is evaluated. In the model `x` is the latent
variable plus threshold constants.
Returns
-------
Value of the probability density function of the underlying latent
variable evaluated at x.
"""
return self.distr.pdf(x) | Pdf evaluated at x
Parameters
----------
x : array_like
Points at which cdf is evaluated. In the model `x` is the latent
variable plus threshold constants.
Returns
-------
Value of the probability density function of the underlying latent
variable evaluated at x. | pdf | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def prob(self, low, upp):
"""Interval probability.
Probability that value is in interval (low, upp], computed as
prob = cdf(upp) - cdf(low)
Parameters
----------
low : array_like
lower bound for interval
upp : array_like
upper bound for interval
Returns
-------
float or ndarray
Probability that value falls in interval (low, upp]
"""
return np.maximum(self.cdf(upp) - self.cdf(low), 0) | Interval probability.
Probability that value is in interval (low, upp], computed as
prob = cdf(upp) - cdf(low)
Parameters
----------
low : array_like
lower bound for interval
upp : array_like
upper bound for interval
Returns
-------
float or ndarray
Probability that value falls in interval (low, upp] | prob | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def transform_threshold_params(self, params):
"""transformation of the parameters in the optimization
Parameters
----------
params : nd_array
Contains (exog_coef, transformed_thresholds) where exog_coef are
the coefficient for the explanatory variables in the linear term,
transformed threshold or cutoff points. The first, lowest threshold
is unchanged, all other thresholds are in terms of exponentiated
increments.
Returns
-------
thresh : nd_array
Thresh are the thresholds or cutoff constants for the intervals.
"""
th_params = params[-(self.k_levels - 1):]
thresh = np.concatenate((th_params[:1],
np.exp(th_params[1:]))).cumsum()
thresh = np.concatenate(([-np.inf], thresh, [np.inf]))
return thresh | transformation of the parameters in the optimization
Parameters
----------
params : nd_array
Contains (exog_coef, transformed_thresholds) where exog_coef are
the coefficient for the explanatory variables in the linear term,
transformed threshold or cutoff points. The first, lowest threshold
is unchanged, all other thresholds are in terms of exponentiated
increments.
Returns
-------
thresh : nd_array
Thresh are the thresholds or cutoff constants for the intervals. | transform_threshold_params | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def transform_reverse_threshold_params(self, params):
"""obtain transformed thresholds from original thresholds or cutoffs
Parameters
----------
params : ndarray
Threshold values, cutoff constants for choice intervals, which
need to be monotonically increasing.
Returns
-------
thresh_params : ndarrray
Transformed threshold parameter.
The first, lowest threshold is unchanged, all other thresholds are
in terms of exponentiated increments.
Transformed parameters can be any real number without restrictions.
"""
thresh_params = np.concatenate((params[:1],
np.log(np.diff(params[:-1]))))
return thresh_params | obtain transformed thresholds from original thresholds or cutoffs
Parameters
----------
params : ndarray
Threshold values, cutoff constants for choice intervals, which
need to be monotonically increasing.
Returns
-------
thresh_params : ndarrray
Transformed threshold parameter.
The first, lowest threshold is unchanged, all other thresholds are
in terms of exponentiated increments.
Transformed parameters can be any real number without restrictions. | transform_reverse_threshold_params | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def predict(self, params, exog=None, offset=None, which="prob"):
"""
Predicted probabilities for each level of the ordinal endog.
Parameters
----------
params : ndarray
Parameters for the Model, (exog_coef, transformed_thresholds).
exog : array_like, optional
Design / exogenous data. If exog is None, model exog is used.
offset : array_like, optional
Offset is added to the linear prediction with coefficient
equal to 1. If offset is not provided and exog
is None, uses the model's offset if present. If not, uses
0 as the default value.
which : {"prob", "linpred", "cumprob"}
Determines which statistic is predicted.
- prob : predicted probabilities to be in each choice. 2-dim.
- linear : 1-dim linear prediction of the latent variable
``x b + offset``
- cumprob : predicted cumulative probability to be in choice k or
lower
Returns
-------
predicted values : ndarray
If which is "prob", then 2-dim predicted probabilities with
observations in rows and one column for each category or level of
the categorical dependent variable.
If which is "cumprob", then "prob" ar cumulatively added to get the
cdf at k, i.e. probability of observing choice k or lower.
If which is "linpred", then the conditional prediction of the
latent variable is returned. In this case, the return is
one-dimensional.
"""
# note, exog and offset handling is in linpred
thresh = self.transform_threshold_params(params)
xb = self._linpred(params, exog=exog, offset=offset)
if which == "linpred":
return xb
xb = xb[:, None]
low = thresh[:-1] - xb
upp = thresh[1:] - xb
if which == "prob":
prob = self.prob(low, upp)
return prob
elif which in ["cum", "cumprob"]:
cumprob = self.cdf(upp)
return cumprob
else:
raise ValueError("`which` is not available") | Predicted probabilities for each level of the ordinal endog.
Parameters
----------
params : ndarray
Parameters for the Model, (exog_coef, transformed_thresholds).
exog : array_like, optional
Design / exogenous data. If exog is None, model exog is used.
offset : array_like, optional
Offset is added to the linear prediction with coefficient
equal to 1. If offset is not provided and exog
is None, uses the model's offset if present. If not, uses
0 as the default value.
which : {"prob", "linpred", "cumprob"}
Determines which statistic is predicted.
- prob : predicted probabilities to be in each choice. 2-dim.
- linear : 1-dim linear prediction of the latent variable
``x b + offset``
- cumprob : predicted cumulative probability to be in choice k or
lower
Returns
-------
predicted values : ndarray
If which is "prob", then 2-dim predicted probabilities with
observations in rows and one column for each category or level of
the categorical dependent variable.
If which is "cumprob", then "prob" ar cumulatively added to get the
cdf at k, i.e. probability of observing choice k or lower.
If which is "linpred", then the conditional prediction of the
latent variable is returned. In this case, the return is
one-dimensional. | predict | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def _linpred(self, params, exog=None, offset=None):
"""Linear prediction of latent variable `x b + offset`.
Parameters
----------
params : ndarray
Parameters for the model, (exog_coef, transformed_thresholds)
exog : array_like, optional
Design / exogenous data. Is exog is None, model exog is used.
offset : array_like, optional
Offset is added to the linear prediction with coefficient
equal to 1. If offset is not provided and exog
is None, uses the model's offset if present. If not, uses
0 as the default value.
Returns
-------
linear : ndarray
1-dim linear prediction given by exog times linear params plus
offset. This is the prediction for the underlying latent variable.
If exog and offset are None, then the predicted values are zero.
"""
if exog is None:
exog = self.exog
if offset is None:
offset = self.offset
else:
if offset is None:
offset = 0
if offset is not None:
offset = np.asarray(offset)
if exog is not None:
_exog = np.asarray(exog)
_params = np.asarray(params)
linpred = _exog.dot(_params[:-(self.k_levels - 1)])
else: # means self.exog is also None
linpred = np.zeros(self.nobs)
if offset is not None:
linpred += offset
return linpred | Linear prediction of latent variable `x b + offset`.
Parameters
----------
params : ndarray
Parameters for the model, (exog_coef, transformed_thresholds)
exog : array_like, optional
Design / exogenous data. Is exog is None, model exog is used.
offset : array_like, optional
Offset is added to the linear prediction with coefficient
equal to 1. If offset is not provided and exog
is None, uses the model's offset if present. If not, uses
0 as the default value.
Returns
-------
linear : ndarray
1-dim linear prediction given by exog times linear params plus
offset. This is the prediction for the underlying latent variable.
If exog and offset are None, then the predicted values are zero. | _linpred | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def _bounds(self, params):
"""Integration bounds for the observation specific interval.
This defines the lower and upper bounds for the intervals of the
choices of all observations.
The bounds for observation are given by
a_{k_i-1} - linpred_i, a_k_i - linpred_i
where
- k_i is the choice in observation i.
- a_{k_i-1} and a_k_i are thresholds (cutoffs) for choice k_i
- linpred_i is the linear prediction for observation i
Parameters
----------
params : ndarray
Parameters for the model, (exog_coef, transformed_thresholds)
Return
------
low : ndarray
Lower bounds for choice intervals of each observation,
1-dim with length nobs
upp : ndarray
Upper bounds for choice intervals of each observation,
1-dim with length nobs.
"""
thresh = self.transform_threshold_params(params)
thresh_i_low = thresh[self.endog]
thresh_i_upp = thresh[self.endog + 1]
xb = self._linpred(params)
low = thresh_i_low - xb
upp = thresh_i_upp - xb
return low, upp | Integration bounds for the observation specific interval.
This defines the lower and upper bounds for the intervals of the
choices of all observations.
The bounds for observation are given by
a_{k_i-1} - linpred_i, a_k_i - linpred_i
where
- k_i is the choice in observation i.
- a_{k_i-1} and a_k_i are thresholds (cutoffs) for choice k_i
- linpred_i is the linear prediction for observation i
Parameters
----------
params : ndarray
Parameters for the model, (exog_coef, transformed_thresholds)
Return
------
low : ndarray
Lower bounds for choice intervals of each observation,
1-dim with length nobs
upp : ndarray
Upper bounds for choice intervals of each observation,
1-dim with length nobs. | _bounds | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def loglikeobs(self, params):
"""
Log-likelihood of OrderdModel for all observations.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike_obs : array_like
The log likelihood for each observation of the model evaluated
at ``params``.
"""
low, upp = self._bounds(params)
prob = self.prob(low, upp)
return np.log(prob + 1e-20) | Log-likelihood of OrderdModel for all observations.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike_obs : array_like
The log likelihood for each observation of the model evaluated
at ``params``. | loglikeobs | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def score_obs_(self, params):
"""score, first derivative of loglike for each observations
This currently only implements the derivative with respect to the
exog parameters, but not with respect to threshold parameters.
"""
low, upp = self._bounds(params)
prob = self.prob(low, upp)
pdf_upp = self.pdf(upp)
pdf_low = self.pdf(low)
# TODO the following doesn't work yet because of the incremental exp
# parameterization. The following was written based on Greene for the
# simple non-incremental parameterization.
# k = self.k_levels - 1
# idx = self.endog
# score_factor = np.zeros((self.nobs, k + 1 + 2)) #+2 avoids idx bounds
#
# rows = np.arange(self.nobs)
# shift = 1
# score_factor[rows, shift + idx-1] = -pdf_low
# score_factor[rows, shift + idx] = pdf_upp
# score_factor[:, 0] = pdf_upp - pdf_low
score_factor = (pdf_upp - pdf_low)[:, None]
score_factor /= prob[:, None]
so = np.column_stack((-score_factor[:, :1] * self.exog,
score_factor[:, 1:]))
return so | score, first derivative of loglike for each observations
This currently only implements the derivative with respect to the
exog parameters, but not with respect to threshold parameters. | score_obs_ | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def start_params(self):
"""Start parameters for the optimization corresponding to null model.
The threshold are computed from the observed frequencies and
transformed to the exponential increments parameterization.
The parameters for explanatory variables are set to zero.
"""
# start params based on model without exog
freq = np.bincount(self.endog) / len(self.endog)
start_ppf = self.distr.ppf(np.clip(freq.cumsum(), 0, 1))
start_threshold = self.transform_reverse_threshold_params(start_ppf)
start_params = np.concatenate((np.zeros(self.k_vars), start_threshold))
return start_params | Start parameters for the optimization corresponding to null model.
The threshold are computed from the observed frequencies and
transformed to the exponential increments parameterization.
The parameters for explanatory variables are set to zero. | start_params | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def pred_table(self):
"""prediction table
returns pandas DataFrame
"""
# todo: add category labels
categories = np.arange(self.model.k_levels)
observed = pd.Categorical(self.model.endog,
categories=categories, ordered=True)
predicted = pd.Categorical(self.predict().argmax(1),
categories=categories, ordered=True)
table = pd.crosstab(predicted,
observed.astype(int),
margins=True,
dropna=False).T.fillna(0)
return table | prediction table
returns pandas DataFrame | pred_table | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def llnull(self):
"""
Value of the loglikelihood of model without explanatory variables
"""
params_null = self.model.start_params
return self.model.loglike(params_null) | Value of the loglikelihood of model without explanatory variables | llnull | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def prsquared(self):
"""
McFadden's pseudo-R-squared. `1 - (llf / llnull)`
"""
return 1 - self.llf/self.llnull | McFadden's pseudo-R-squared. `1 - (llf / llnull)` | prsquared | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def llr(self):
"""
Likelihood ratio chi-squared statistic; `-2*(llnull - llf)`
"""
return -2*(self.llnull - self.llf) | Likelihood ratio chi-squared statistic; `-2*(llnull - llf)` | llr | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
def llr_pvalue(self):
"""
The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`.
"""
# number of restrictions is number of exog
return stats.distributions.chi2.sf(self.llr, self.model.k_vars) | The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`. | llr_pvalue | python | statsmodels/statsmodels | statsmodels/miscmodels/ordinal_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/miscmodels/ordinal_model.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.