code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def _set_tuning_param(self, c, inplace=False):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param.
"""
a = c / 4
b = c / 2
if inplace:
self.c = c
self.a = a
self.b = b
return self
else:
return self.__class__(a=a, b=b, c=c) | Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param. | _set_tuning_param | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _subset(self, z):
"""
Hampel's function is defined piecewise over the range of z
"""
z = np.abs(np.asarray(z))
t1 = np.less_equal(z, self.a)
t2 = np.less_equal(z, self.b) * np.greater(z, self.a)
t3 = np.less_equal(z, self.c) * np.greater(z, self.b)
return t1, t2, t3 | Hampel's function is defined piecewise over the range of z | _subset | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""Derivative of psi function, second derivative of rho function.
"""
a, b, c = self.a, self.b, self.c
z_isscalar = np.isscalar(z)
z = np.atleast_1d(z)
t1, _, t3 = self._subset(z)
dt = np.promote_types(z.dtype, "float")
d = np.zeros(z.shape, dtype=dt)
d[t1] = 1.0
zt3 = z[t3]
d[t3] = -(a * np.sign(zt3) * zt3) / (np.abs(zt3) * (c - b))
if z_isscalar:
d = d[0]
return d | Derivative of psi function, second derivative of rho function. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def get_tuning(cls, bp=None, eff=None):
"""Tuning parameter for given breakdown point or efficiency.
This currently only return values from a table.
Parameters
----------
bp : float in [0.05, 0.5] or None
Required breakdown point
Either bp or eff has to be specified, but not both.
eff : float or None
Required asymptotic efficiency.
Either bp or eff has to be specified, but not both.
Returns
-------
float : tuning parameter.
"""
if ((bp is None and eff is None) or
(bp is not None and eff is not None)):
raise ValueError("exactly one of bp and eff needs to be provided")
if bp is not None:
return _tables.tukeybiweight_bp[bp]
elif eff is not None:
return _tables.tukeybiweight_eff[eff] | Tuning parameter for given breakdown point or efficiency.
This currently only return values from a table.
Parameters
----------
bp : float in [0.05, 0.5] or None
Required breakdown point
Either bp or eff has to be specified, but not both.
eff : float or None
Required asymptotic efficiency.
Either bp or eff has to be specified, but not both.
Returns
-------
float : tuning parameter. | get_tuning | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _set_tuning_param(self, c, inplace=False):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param.
"""
# todo : change default to inplace=False, when tools are fixed
if inplace:
self.c = c
return self
else:
return self.__class__(c=c) | Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param. | _set_tuning_param | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _subset(self, z):
"""
Tukey's biweight is defined piecewise over the range of z
"""
z = np.abs(np.asarray(z))
return np.less_equal(z, self.c) | Tukey's biweight is defined piecewise over the range of z | _subset | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
The derivative of Tukey's biweight psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
subset = self._subset(z)
return subset * ((1 - (z/self.c)**2)**2
- (4*z**2/self.c**2) * (1-(z/self.c)**2)) | The derivative of Tukey's biweight psi function
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _set_tuning_param(self, c, inplace=False):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param.
"""
if inplace:
self.c = c
return self
else:
return self.__class__(c=c, k=self.k) | Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param. | _set_tuning_param | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _subset(self, z):
"""
TukeyQuartic is defined piecewise over the range of z
"""
z = np.abs(np.asarray(z))
return np.less_equal(z, self.c) | TukeyQuartic is defined piecewise over the range of z | _subset | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
The derivative of the TukeyQuartic psi function.
Notes
-----
Used to estimate the robust covariance matrix.
"""
c = self.c
k = self.k
subset = self._subset(z)
x = z / c
# d/dx(x (1 - (x/c)^k)^2) = -(1 - (x/c)^k) (2 k (x/c)^k + (x/c)^k - 1)
return subset * (1 - x**k) * (1 - (2 * k + 1) * x**k) | The derivative of the TukeyQuartic psi function.
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _set_tuning_param(self, c, inplace=False):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param.
"""
if inplace:
self.c = c
return self
else:
return self.__class__(c=c, df=self.df) | Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param. | _set_tuning_param | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def rho(self, z):
"""
The rho function of the StudentT norm.
Parameters
----------
z : ndarray
1d array
Returns
-------
rho : ndarray
rho(z) = (c**2 * df / 2.) * log(df + (z / c)**2) - const
The ``const`` shifts the rho function so that rho(0) = 0.
"""
c = self.c
df = self.df
z = np.asarray(z)
const = (c**2 * df / 2.) * np.log(df) if df != 0 else 0
return (c**2 * df / 2.) * np.log(df + (z / c)**2) - const | The rho function of the StudentT norm.
Parameters
----------
z : ndarray
1d array
Returns
-------
rho : ndarray
rho(z) = (c**2 * df / 2.) * log(df + (z / c)**2) - const
The ``const`` shifts the rho function so that rho(0) = 0. | rho | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi(self, z):
"""
The psi function of the StudentT norm.
The analytic derivative of rho.
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray
psi(z) = z
"""
c = self.c
df = self.df
z = np.asarray(z)
return z * df / (df + (z / c)**2) | The psi function of the StudentT norm.
The analytic derivative of rho.
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray
psi(z) = z | psi | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def weights(self, z):
"""
The weighting function for the IRLS algorithm of the StudentT norm.
The psi function scaled by the input z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray
weights(z) = np.ones(z.shape)
"""
c = self.c
df = self.df
z = np.asarray(z)
return df / (df + (z / c)**2) | The weighting function for the IRLS algorithm of the StudentT norm.
The psi function scaled by the input z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray
weights(z) = np.ones(z.shape) | weights | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
The derivative of the psi function of the StudentT norm.
Returns
-------
psi_deriv : ndarray
ones(z.shape)
Notes
-----
Used to estimate the robust covariance matrix.
"""
c = self.c
df = self.df
x = np.asarray(z) / c
return - 2 * df * x**2 / (df + x**2)**2 + df / (df + x**2) | The derivative of the psi function of the StudentT norm.
Returns
-------
psi_deriv : ndarray
ones(z.shape)
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def rho(self, z):
"""
The robust criterion function for MQuantileNorm.
Parameters
----------
z : array_like
1d array
Returns
-------
rho : ndarray
"""
qq = self._get_q(z)
return qq * self.base_norm.rho(z) | The robust criterion function for MQuantileNorm.
Parameters
----------
z : array_like
1d array
Returns
-------
rho : ndarray | rho | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi(self, z):
"""
The psi function for MQuantileNorm estimator.
The analytic derivative of rho
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray
"""
qq = self._get_q(z)
return qq * self.base_norm.psi(z) | The psi function for MQuantileNorm estimator.
The analytic derivative of rho
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray | psi | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def weights(self, z):
"""
MQuantileNorm weighting function for the IRLS algorithm
The psi function scaled by z, psi(z) / z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray
"""
qq = self._get_q(z)
return qq * self.base_norm.weights(z) | MQuantileNorm weighting function for the IRLS algorithm
The psi function scaled by z, psi(z) / z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray | weights | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
'''
The derivative of MQuantileNorm function
Parameters
----------
z : array_like
1d array
Returns
-------
psi_deriv : ndarray
Notes
-----
Used to estimate the robust covariance matrix.
'''
qq = self._get_q(z)
return qq * self.base_norm.psi_deriv(z) | The derivative of MQuantileNorm function
Parameters
----------
z : array_like
1d array
Returns
-------
psi_deriv : ndarray
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def __call__(self, z):
"""
Returns the value of estimator rho applied to an input
"""
return self.rho(z) | Returns the value of estimator rho applied to an input | __call__ | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def estimate_location(a, scale, norm=None, axis=0, initial=None,
maxiter=30, tol=1.0e-06):
"""
M-estimator of location using self.norm and a current
estimator of scale.
This iteratively finds a solution to
norm.psi((a-mu)/scale).sum() == 0
Parameters
----------
a : ndarray
Array over which the location parameter is to be estimated
scale : ndarray
Scale parameter to be used in M-estimator
norm : RobustNorm, optional
Robust norm used in the M-estimator. The default is HuberT().
axis : int, optional
Axis along which to estimate the location parameter. The default is 0.
initial : ndarray, optional
Initial condition for the location parameter. Default is None, which
uses the median of a.
niter : int, optional
Maximum number of iterations. The default is 30.
tol : float, optional
Toleration for convergence. The default is 1e-06.
Returns
-------
mu : ndarray
Estimate of location
"""
if norm is None:
norm = HuberT()
if initial is None:
mu = np.median(a, axis)
else:
mu = initial
for _ in range(maxiter):
W = norm.weights((a-mu)/scale)
nmu = np.sum(W*a, axis) / np.sum(W, axis)
if np.all(np.less(np.abs(mu - nmu), scale * tol)):
return nmu
else:
mu = nmu
raise ValueError("location estimator failed to converge in %d iterations"
% maxiter) | M-estimator of location using self.norm and a current
estimator of scale.
This iteratively finds a solution to
norm.psi((a-mu)/scale).sum() == 0
Parameters
----------
a : ndarray
Array over which the location parameter is to be estimated
scale : ndarray
Scale parameter to be used in M-estimator
norm : RobustNorm, optional
Robust norm used in the M-estimator. The default is HuberT().
axis : int, optional
Axis along which to estimate the location parameter. The default is 0.
initial : ndarray, optional
Initial condition for the location parameter. Default is None, which
uses the median of a.
niter : int, optional
Maximum number of iterations. The default is 30.
tol : float, optional
Toleration for convergence. The default is 1e-06.
Returns
-------
mu : ndarray
Estimate of location | estimate_location | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def mad(a, c=Gaussian.ppf(3 / 4.0), axis=0, center=np.median):
# c \approx .6745
"""
The Median Absolute Deviation along given axis of an array
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),
which is approximately 0.6745.
axis : int, optional
The default is 0. Can also be None.
center : callable or float
If a callable is provided, such as the default `np.median` then it
is expected to be called center(a). The axis argument will be applied
via np.apply_over_axes. Otherwise, provide a float.
Returns
-------
mad : float
`mad` = median(abs(`a` - center))/`c`
"""
a = array_like(a, "a", ndim=None)
c = float_like(c, "c")
if not a.size:
center_val = 0.0
elif callable(center):
if axis is not None:
center_val = np.apply_over_axes(center, a, axis)
else:
center_val = center(a.ravel())
else:
center_val = float_like(center, "center")
err = (np.abs(a - center_val)) / c
if not err.size:
if axis is None or err.ndim == 1:
return np.nan
else:
shape = list(err.shape)
shape.pop(axis)
return np.empty(shape)
return np.median(err, axis=axis) | The Median Absolute Deviation along given axis of an array
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),
which is approximately 0.6745.
axis : int, optional
The default is 0. Can also be None.
center : callable or float
If a callable is provided, such as the default `np.median` then it
is expected to be called center(a). The axis argument will be applied
via np.apply_over_axes. Otherwise, provide a float.
Returns
-------
mad : float
`mad` = median(abs(`a` - center))/`c` | mad | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def iqr(a, c=Gaussian.ppf(3 / 4) - Gaussian.ppf(1 / 4), axis=0):
"""
The normalized interquartile range along given axis of an array
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant, used to get consistent estimates of the
standard deviation at the normal distribution. Defined as
scipy.stats.norm.ppf(3/4.) - scipy.stats.norm.ppf(1/4.), which is
approximately 1.349.
axis : int, optional
The default is 0. Can also be None.
Returns
-------
The normalized interquartile range
"""
a = array_like(a, "a", ndim=None)
c = float_like(c, "c")
if a.ndim == 0:
raise ValueError("a should have at least one dimension")
elif a.size == 0:
return np.nan
else:
quantiles = np.quantile(a, [0.25, 0.75], axis=axis)
return np.squeeze(np.diff(quantiles, axis=0) / c) | The normalized interquartile range along given axis of an array
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant, used to get consistent estimates of the
standard deviation at the normal distribution. Defined as
scipy.stats.norm.ppf(3/4.) - scipy.stats.norm.ppf(1/4.), which is
approximately 1.349.
axis : int, optional
The default is 0. Can also be None.
Returns
-------
The normalized interquartile range | iqr | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def qn_scale(a, c=1 / (np.sqrt(2) * Gaussian.ppf(5 / 8)), axis=0):
"""
Computes the Qn robust estimator of scale
The Qn scale estimator is a more efficient alternative to the MAD.
The Qn scale estimator of an array a of length n is defined as
c * {abs(a[i] - a[j]): i<j}_(k), for k equal to [n/2] + 1 choose 2. Thus,
the Qn estimator is the k-th order statistic of the absolute differences
of the array. The optional constant is used to normalize the estimate
as explained below. The implementation follows the algorithm described
in Croux and Rousseeuw (1992).
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant. The default value is used to get consistent
estimates of the standard deviation at the normal distribution.
axis : int, optional
The default is 0.
Returns
-------
{float, ndarray}
The Qn robust estimator of scale
"""
a = array_like(
a, "a", ndim=None, dtype=np.float64, contiguous=True, order="C"
)
c = float_like(c, "c")
if a.ndim == 0:
raise ValueError("a should have at least one dimension")
elif a.size == 0:
return np.nan
else:
out = np.apply_along_axis(_qn, axis=axis, arr=a, c=c)
if out.ndim == 0:
return float(out)
return out | Computes the Qn robust estimator of scale
The Qn scale estimator is a more efficient alternative to the MAD.
The Qn scale estimator of an array a of length n is defined as
c * {abs(a[i] - a[j]): i<j}_(k), for k equal to [n/2] + 1 choose 2. Thus,
the Qn estimator is the k-th order statistic of the absolute differences
of the array. The optional constant is used to normalize the estimate
as explained below. The implementation follows the algorithm described
in Croux and Rousseeuw (1992).
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant. The default value is used to get consistent
estimates of the standard deviation at the normal distribution.
axis : int, optional
The default is 0.
Returns
-------
{float, ndarray}
The Qn robust estimator of scale | qn_scale | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def _qn_naive(a, c=1 / (np.sqrt(2) * Gaussian.ppf(5 / 8))):
"""
A naive implementation of the Qn robust estimator of scale, used solely
to test the faster, more involved one
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant, used to get consistent estimates of the
standard deviation at the normal distribution. Defined as
1/(np.sqrt(2) * scipy.stats.norm.ppf(5/8)), which is 2.219144.
Returns
-------
The Qn robust estimator of scale
"""
a = np.squeeze(a)
n = a.shape[0]
if a.size == 0:
return np.nan
else:
h = int(n // 2 + 1)
k = int(h * (h - 1) / 2)
idx = np.triu_indices(n, k=1)
diffs = np.abs(a[idx[0]] - a[idx[1]])
output = np.partition(diffs, kth=k - 1)[k - 1]
output = c * output
return output | A naive implementation of the Qn robust estimator of scale, used solely
to test the faster, more involved one
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant, used to get consistent estimates of the
standard deviation at the normal distribution. Defined as
1/(np.sqrt(2) * scipy.stats.norm.ppf(5/8)), which is 2.219144.
Returns
-------
The Qn robust estimator of scale | _qn_naive | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def __call__(self, a, mu=None, initscale=None, axis=0):
"""
Compute Huber's proposal 2 estimate of scale, using an optional
initial value of scale and an optional estimate of mu. If mu
is supplied, it is not reestimated.
Parameters
----------
a : ndarray
1d array
mu : float or None, optional
If the location mu is supplied then it is not reestimated.
Default is None, which means that it is estimated.
initscale : float or None, optional
A first guess on scale. If initscale is None then the standardized
median absolute deviation of a is used.
Notes
-----
`Huber` minimizes the function
sum(psi((a[i]-mu)/scale)**2)
as a function of (mu, scale), where
psi(x) = np.clip(x, -self.c, self.c)
"""
a = np.asarray(a)
if mu is None:
n = a.shape[axis] - 1
mu = np.median(a, axis=axis)
est_mu = True
else:
n = a.shape[axis]
mu = mu
est_mu = False
if initscale is None:
scale = mad(a, axis=axis)
else:
scale = initscale
scale = tools.unsqueeze(scale, axis, a.shape)
mu = tools.unsqueeze(mu, axis, a.shape)
return self._estimate_both(a, scale, mu, axis, est_mu, n) | Compute Huber's proposal 2 estimate of scale, using an optional
initial value of scale and an optional estimate of mu. If mu
is supplied, it is not reestimated.
Parameters
----------
a : ndarray
1d array
mu : float or None, optional
If the location mu is supplied then it is not reestimated.
Default is None, which means that it is estimated.
initscale : float or None, optional
A first guess on scale. If initscale is None then the standardized
median absolute deviation of a is used.
Notes
-----
`Huber` minimizes the function
sum(psi((a[i]-mu)/scale)**2)
as a function of (mu, scale), where
psi(x) = np.clip(x, -self.c, self.c) | __call__ | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def fit(self, data, start_scale='mad', maxiter=100, rtol=1e-6, atol=1e-8):
"""
Estimate M-scale using iteration.
Parameters
----------
data : array-like
Data, currently assumed to be 1-dimensional.
start_scale : string or float.
Starting value of scale or method to compute the starting value.
Default is using 'mad', no other string options are available.
maxiter : int
Maximum number of iterations.
rtol : float
Relative convergence tolerance.
atol : float
Absolute onvergence tolerance.
Returns
-------
float : Scale estimate. The estimated variance is scale squared.
Todo: switch to Holder instance with more information.
"""
scale = _scale_iter(
data,
scale0=start_scale,
maxiter=maxiter, rtol=rtol, atol=atol,
meef_scale=self.chi_func,
scale_bias=self.scale_bias,
)
return scale | Estimate M-scale using iteration.
Parameters
----------
data : array-like
Data, currently assumed to be 1-dimensional.
start_scale : string or float.
Starting value of scale or method to compute the starting value.
Default is using 'mad', no other string options are available.
maxiter : int
Maximum number of iterations.
rtol : float
Relative convergence tolerance.
atol : float
Absolute onvergence tolerance.
Returns
-------
float : Scale estimate. The estimated variance is scale squared.
Todo: switch to Holder instance with more information. | fit | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def scale_trimmed(data, alpha, center='median', axis=0, distr=None,
distargs=None):
"""scale estimate based on symmetrically trimmed sample
The scale estimate is robust to a fraction alpha of outliers on each
tail.
The scale is normalized to correspond to a reference distribution, which
is the normal distribution by default.
Parameters
----------
data : array_like
dataset, by default (axis=0) observations are assumed to be in rows
and variables in columns.
alpha : float in interval (0, 1)
Trimming fraction in each tail. The floor(nobs * alpha) smallest
observations are trimmed, and the same number of the largest
observations are trimmed. scale estimate is base on a fraction
(1 - 2 * alpha) of observations.
center : 'median', 'mean', 'tmean' or number
`center` defines how the trimmed sample is centered. 'median' and
'mean' are calculated on the full sample. `tmean` is the trimmed
mean, calculated with the trimmed sample. If `center` is array_like
then it needs to be scalar or correspond to the shape of the data
reduced by axis.
axis : int, default is 0
axis along which scale is estimated.
distr : None, 'raw' or a distribution instance
Default if distr is None is the normal distribution `scipy.stats.norm`.
This is the reference distribution to normalize the scale.
Note: This cannot be a frozen instance, since it does not have an
`expect` method.
If distr is 'raw', then the scale is not normalized.
distargs :
Arguments for the distribution.
Returns
-------
scale : float or array
the estimated scale normalized for the reference distribution.
Examples
--------
for normal distribution
>>> np.random.seed(1)
>>> x = 2 * np.random.randn(100)
>>> scale_trimmed(x, 0.1)
1.7479516739879672
for t distribution
>>> xt = stats.t.rvs(3, size=1000, scale=2)
>>> print scale_trimmed(xt, alpha, distr=stats.t, distargs=(3,))
2.06574778599
compare to standard deviation of sample
>>> xt.std()
3.1457788359130481
"""
if distr is None:
distr = stats.norm
if distargs is None:
distargs = ()
x = np.array(data) # make copy for inplace sort
if axis is None:
x = x.ravel()
axis = 0
# TODO: latest numpy has partial sort
x.sort(axis)
nobs = x.shape[axis]
if distr == 'raw':
c_inv = 1
else:
bound = distr.ppf(1 - alpha, *distargs)
c_inv = distr.expect(lambda x: x*x, lb=-bound, ub=bound, args=distargs)
cut_idx = np.floor(nobs * alpha).astype(int)
sl = [slice(None, None, None)] * x.ndim
sl[axis] = slice(cut_idx, -cut_idx)
# x_trimmed = x[cut_idx:-cut_idx]
# cut in axis
x_trimmed = x[tuple(sl)]
center_type = center
if center in ['med', 'median']:
center = np.median(x, axis=axis)
elif center == 'mean':
center = np.mean(x, axis=axis)
elif center == 'tmean':
center = np.mean(x_trimmed, axis=axis)
else:
# assume number
center_type = 'user'
center_ndim = np.ndim(center)
if (center_ndim > 0) and (center_ndim < x.ndim):
center = np.expand_dims(center, axis)
s_raw = ((x_trimmed - center)**2).sum(axis)
scale = np.sqrt(s_raw / nobs / c_inv)
res = Holder(scale=scale,
center=center,
center_type=center_type,
trim_idx=cut_idx,
nobs=nobs,
distr=distr,
scale_correction=1. / c_inv)
return res | scale estimate based on symmetrically trimmed sample
The scale estimate is robust to a fraction alpha of outliers on each
tail.
The scale is normalized to correspond to a reference distribution, which
is the normal distribution by default.
Parameters
----------
data : array_like
dataset, by default (axis=0) observations are assumed to be in rows
and variables in columns.
alpha : float in interval (0, 1)
Trimming fraction in each tail. The floor(nobs * alpha) smallest
observations are trimmed, and the same number of the largest
observations are trimmed. scale estimate is base on a fraction
(1 - 2 * alpha) of observations.
center : 'median', 'mean', 'tmean' or number
`center` defines how the trimmed sample is centered. 'median' and
'mean' are calculated on the full sample. `tmean` is the trimmed
mean, calculated with the trimmed sample. If `center` is array_like
then it needs to be scalar or correspond to the shape of the data
reduced by axis.
axis : int, default is 0
axis along which scale is estimated.
distr : None, 'raw' or a distribution instance
Default if distr is None is the normal distribution `scipy.stats.norm`.
This is the reference distribution to normalize the scale.
Note: This cannot be a frozen instance, since it does not have an
`expect` method.
If distr is 'raw', then the scale is not normalized.
distargs :
Arguments for the distribution.
Returns
-------
scale : float or array
the estimated scale normalized for the reference distribution.
Examples
--------
for normal distribution
>>> np.random.seed(1)
>>> x = 2 * np.random.randn(100)
>>> scale_trimmed(x, 0.1)
1.7479516739879672
for t distribution
>>> xt = stats.t.rvs(3, size=1000, scale=2)
>>> print scale_trimmed(xt, alpha, distr=stats.t, distargs=(3,))
2.06574778599
compare to standard deviation of sample
>>> xt.std()
3.1457788359130481 | scale_trimmed | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def _weight_mean(x, c):
"""Tukey-biweight, bisquare weights used in tau scale.
Parameters
----------
x : ndarray
Data
c : float
Parameter for bisquare weights
Returns
-------
ndarray : weights
"""
x = np.asarray(x)
w = (1 - (x / c)**2)**2 * (np.abs(x) <= c)
return w | Tukey-biweight, bisquare weights used in tau scale.
Parameters
----------
x : ndarray
Data
c : float
Parameter for bisquare weights
Returns
-------
ndarray : weights | _weight_mean | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def _winsor(x, c):
"""Winsorized squared data used in tau scale.
Parameters
----------
x : ndarray
Data
c : float
threshold
Returns
-------
winsorized squared data, ``np.minimum(x**2, c**2)``
"""
return np.minimum(x**2, c**2) | Winsorized squared data used in tau scale.
Parameters
----------
x : ndarray
Data
c : float
threshold
Returns
-------
winsorized squared data, ``np.minimum(x**2, c**2)`` | _winsor | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def scale_tau(data, cm=4.5, cs=3, weight_mean=_weight_mean,
weight_scale=_winsor, normalize=True, ddof=0):
"""Tau estimator of univariate scale.
Experimental, API will change
Parameters
----------
data : array_like, 1-D or 2-D
If data is 2d, then the location and scale estimates
are calculated for each column
cm : float
constant used in call to weight_mean
cs : float
constant used in call to weight_scale
weight_mean : callable
function to calculate weights for weighted mean
weight_scale : callable
function to calculate scale, "rho" function
normalize : bool
rescale the scale estimate so it is consistent when the data is
normally distributed. The computation assumes winsorized (truncated)
variance.
Returns
-------
mean : nd_array
robust mean
std : nd_array
robust estimate of scale (standard deviation)
Notes
-----
Uses definition of Maronna and Zamar 2002, with weighted mean and
trimmed variance.
The normalization has been added to match R robustbase.
R robustbase uses by default ddof=0, with option to set it to 2.
References
----------
.. [1] Maronna, Ricardo A, and Ruben H Zamar. “Robust Estimates of Location
and Dispersion for High-Dimensional Datasets.” Technometrics 44, no. 4
(November 1, 2002): 307–17. https://doi.org/10.1198/004017002188618509.
"""
x = np.asarray(data)
nobs = x.shape[0]
med_x = np.median(x, 0)
xdm = x - med_x
mad_x = np.median(np.abs(xdm), 0)
wm = weight_mean(xdm / mad_x, cm)
mean = (wm * x).sum(0) / wm.sum(0)
var = (mad_x**2 * weight_scale((x - mean) / mad_x, cs).sum(0) /
(nobs - ddof))
cf = 1
if normalize:
c = cs * stats.norm.ppf(0.75)
cf = 2 * ((1 - c**2) * stats.norm.cdf(c) - c * stats.norm.pdf(c)
+ c**2) - 1
# return Holder(loc=mean, scale=np.sqrt(var / cf))
return mean, np.sqrt(var / cf) | Tau estimator of univariate scale.
Experimental, API will change
Parameters
----------
data : array_like, 1-D or 2-D
If data is 2d, then the location and scale estimates
are calculated for each column
cm : float
constant used in call to weight_mean
cs : float
constant used in call to weight_scale
weight_mean : callable
function to calculate weights for weighted mean
weight_scale : callable
function to calculate scale, "rho" function
normalize : bool
rescale the scale estimate so it is consistent when the data is
normally distributed. The computation assumes winsorized (truncated)
variance.
Returns
-------
mean : nd_array
robust mean
std : nd_array
robust estimate of scale (standard deviation)
Notes
-----
Uses definition of Maronna and Zamar 2002, with weighted mean and
trimmed variance.
The normalization has been added to match R robustbase.
R robustbase uses by default ddof=0, with option to set it to 2.
References
----------
.. [1] Maronna, Ricardo A, and Ruben H Zamar. “Robust Estimates of Location
and Dispersion for High-Dimensional Datasets.” Technometrics 44, no. 4
(November 1, 2002): 307–17. https://doi.org/10.1198/004017002188618509. | scale_tau | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def _scale_iter(data, scale0='mad', maxiter=100, rtol=1e-6, atol=1e-8,
meef_scale=None, scale_bias=None, iter_method="rho", ddof=0):
"""iterative scale estimate base on "rho" function
"""
x = np.asarray(data)
nobs = x.shape[0]
if scale0 == 'mad':
scale0 = mad(x, center=0)
for i in range(maxiter):
x_scaled = x / scale0
if iter_method == "rho":
scale = scale0 * np.sqrt(
np.sum(meef_scale(x / scale0)) / scale_bias / (nobs - ddof))
else:
weights_scale = meef_scale(x_scaled) / (1e-50 + x_scaled**2)
scale2 = (weights_scale * x**2).sum() / (nobs - ddof)
scale2 /= scale_bias
scale = np.sqrt(scale2)
if debug:
print(scale)
if np.allclose(scale, scale0, atol=atol, rtol=rtol):
break
scale0 = scale
return scale | iterative scale estimate base on "rho" function | _scale_iter | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def fit(self, h=None, scale_binding=False, start=None):
"""Estimate the model
Parameters
----------
h : int
The size of the initial sets for the S-estimator.
Default is .... (todo)
scale_binding : bool
If true, then the scale is fixed in the second stage M-estimation,
i.e. this is the MM-estimator.
If false, then the high breakdown point M-scale is used also in the
second stage M-estimation if that estimated scale is smaller than
the scale of the preliminary, first stage S-estimato.
start : tuple or None
If None, then the starting parameters and scale for the second
stage M-estimation are taken from the fist stage S-estimator.
Alternatively, the starting parameters and starting scale can be
provided by the user as tuple (start_params, start_scale). In this
case the first stage S-estimation in skipped.
maxiter, other optimization parameters are still missing (todo)
Returns
-------
results instance
Notes
-----
If scale_binding is false, then the estimator is a standard
MM-estimator with fixed scale in the second stage M-estimation.
If scale_binding is true, then the estimator will try to find an
estimate with lower M-scale using the same scale-norm rho as in the
first stage S-estimator. If the estimated scale, is not smaller than
then the scale estimated in the first stage S-estimator, then the
fixed scale MM-estimator is returned.
"""
norm_m = self.norm_mean
if start is None:
res_s = super().fit(h)
start_params = np.asarray(res_s.params)
start_scale = res_s.scale
else:
start_params, start_scale = start
res_s = None
mod_m = RLM(self.endog, self.exog, M=norm_m)
res_mm = mod_m.fit(
start_params=start_params,
start_scale=start_scale,
update_scale=False
)
if not scale_binding:
# we can compute this first and skip MM if scale decrease
mod_sm = RLM(self.endog, self.exog, M=norm_m)
res_sm = mod_sm.fit(
start_params=start_params,
scale_est=self.mscale
)
if not scale_binding and res_sm.scale < res_mm.scale:
res = res_sm
else:
res = res_mm
res._results.results_dets = res_s
return res | Estimate the model
Parameters
----------
h : int
The size of the initial sets for the S-estimator.
Default is .... (todo)
scale_binding : bool
If true, then the scale is fixed in the second stage M-estimation,
i.e. this is the MM-estimator.
If false, then the high breakdown point M-scale is used also in the
second stage M-estimation if that estimated scale is smaller than
the scale of the preliminary, first stage S-estimato.
start : tuple or None
If None, then the starting parameters and scale for the second
stage M-estimation are taken from the fist stage S-estimator.
Alternatively, the starting parameters and starting scale can be
provided by the user as tuple (start_params, start_scale). In this
case the first stage S-estimation in skipped.
maxiter, other optimization parameters are still missing (todo)
Returns
-------
results instance
Notes
-----
If scale_binding is false, then the estimator is a standard
MM-estimator with fixed scale in the second stage M-estimation.
If scale_binding is true, then the estimator will try to find an
estimate with lower M-scale using the same scale-norm rho as in the
first stage S-estimator. If the estimated scale, is not smaller than
then the scale estimated in the first stage S-estimator, then the
fixed scale MM-estimator is returned. | fit | python | statsmodels/statsmodels | statsmodels/robust/resistant_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/resistant_linear_model.py | BSD-3-Clause |
def _initialize(self):
"""
Initializes the model for the IRLS fit.
Resets the history and number of iterations.
"""
self.pinv_wexog = np.linalg.pinv(self.exog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
self.df_resid = (float(self.exog.shape[0] -
np.linalg.matrix_rank(self.exog)))
self.df_model = float(np.linalg.matrix_rank(self.exog) - 1)
self.nobs = float(self.endog.shape[0]) | Initializes the model for the IRLS fit.
Resets the history and number of iterations. | _initialize | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array_like
Parameters of a linear model
exog : array_like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values
"""
# copied from linear_model # TODO: then is it needed?
if exog is None:
exog = self.exog
return np.dot(exog, params) | Return linear predicted values from a design matrix.
Parameters
----------
params : array_like
Parameters of a linear model
exog : array_like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values | predict | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def deviance(self, tmp_results):
"""
Returns the (unnormalized) log-likelihood from the M estimator.
"""
tmp_resid = self.endog - tmp_results.fittedvalues
return self.M(tmp_resid / tmp_results.scale).sum() | Returns the (unnormalized) log-likelihood from the M estimator. | deviance | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def _estimate_scale(self, resid):
"""
Estimates the scale based on the option provided to the fit method.
"""
if isinstance(self.scale_est, str):
if self.scale_est.lower() == 'mad':
return scale.mad(resid, center=0)
else:
raise ValueError("Option %s for scale_est not understood" %
self.scale_est)
elif isinstance(self.scale_est, scale.HuberScale):
return self.scale_est(self.df_resid, self.nobs, resid)
else:
# use df correction to match HuberScale
return self.scale_est(resid) * np.sqrt(self.nobs / self.df_resid) | Estimates the scale based on the option provided to the fit method. | _estimate_scale | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def fit(self, maxiter=50, tol=1e-8, scale_est='mad', init=None, cov='H1',
update_scale=True, conv='dev', start_params=None, start_scale=None,
):
"""
Fits the model using iteratively reweighted least squares.
The IRLS routine runs until the specified objective converges to `tol`
or `maxiter` has been reached.
Parameters
----------
conv : str
Indicates the convergence criteria.
Available options are "coefs" (the coefficients), "weights" (the
weights in the iteration), "sresid" (the standardized residuals),
and "dev" (the un-normalized log-likelihood for the M
estimator). The default is "dev".
cov : str, optional
'H1', 'H2', or 'H3'
Indicates how the covariance matrix is estimated. Default is 'H1'.
See rlm.RLMResults for more information.
init : str
Specifies method for the initial estimates of the parameters.
Default is None, which means that the least squares estimate
is used. Currently it is the only available choice.
Deprecated and will be removed. There is no choice here.
maxiter : int
The maximum number of iterations to try. Default is 50.
scale_est : str or HuberScale()
'mad' or HuberScale()
Indicates the estimate to use for scaling the weights in the IRLS.
The default is 'mad' (median absolute deviation. Other options are
'HuberScale' for Huber's proposal 2. Huber's proposal 2 has
optional keyword arguments d, tol, and maxiter for specifying the
tuning constant, the convergence tolerance, and the maximum number
of iterations. See statsmodels.robust.scale for more information.
tol : float
The convergence tolerance of the estimate. Default is 1e-8.
update_scale : Bool
If `update_scale` is False then the scale estimate for the
weights is held constant over the iteration. Otherwise, it
is updated for each fit in the iteration. Default is True.
start_params : array_like, optional
Initial guess of the solution of the optimizer. If not provided,
the initial parameters are computed using OLS.
start_scale : float, optional
Initial scale. If update_scale is False, then the scale will be
fixed at this level for the estimation of the mean parameters.
during iteration. If not provided, then the initial scale is
estimated from the OLS residuals
Returns
-------
results : statsmodels.rlm.RLMresults
Results instance
"""
if cov.upper() not in ["H1", "H2", "H3"]:
raise ValueError("Covariance matrix %s not understood" % cov)
else:
self.cov = cov.upper()
conv = conv.lower()
if conv not in ["weights", "coefs", "dev", "sresid"]:
raise ValueError("Convergence argument %s not understood" % conv)
self.scale_est = scale_est
if start_params is None:
wls_results = lm.WLS(self.endog, self.exog).fit()
else:
start_params = np.asarray(start_params, dtype=np.double).squeeze()
start_params = np.atleast_1d(start_params)
if (start_params.shape[0] != self.exog.shape[1] or
start_params.ndim != 1):
raise ValueError('start_params must by a 1-d array with {} '
'values'.format(self.exog.shape[1]))
fake_wls = reg_tools._MinimalWLS(self.endog, self.exog,
weights=np.ones_like(self.endog),
check_weights=False)
wls_results = fake_wls.results(start_params)
if not init and not start_scale:
self.scale = self._estimate_scale(wls_results.resid)
elif start_scale:
self.scale = start_scale
if not update_scale:
self.scale_est = scale_est = "fixed"
history = dict(params=[np.inf], scale=[])
if conv == 'coefs':
criterion = history['params']
elif conv == 'dev':
history.update(dict(deviance=[np.inf]))
criterion = history['deviance']
elif conv == 'sresid':
history.update(dict(sresid=[np.inf]))
criterion = history['sresid']
elif conv == 'weights':
history.update(dict(weights=[np.inf]))
criterion = history['weights']
# done one iteration so update
history = self._update_history(wls_results, history, conv)
iteration = 1
converged = 0
while not converged:
if self.scale == 0.0:
import warnings
warnings.warn('Estimated scale is 0.0 indicating that the most'
' last iteration produced a perfect fit of the '
'weighted data.', ConvergenceWarning)
break
self.weights = self.M.weights(wls_results.resid / self.scale)
wls_results = reg_tools._MinimalWLS(self.endog, self.exog,
weights=self.weights,
check_weights=True).fit()
if update_scale is True:
self.scale = self._estimate_scale(wls_results.resid)
history = self._update_history(wls_results, history, conv)
iteration += 1
converged = _check_convergence(criterion, iteration, tol, maxiter)
results = RLMResults(self, wls_results.params,
self.normalized_cov_params, self.scale)
history['iteration'] = iteration
results.fit_history = history
results.fit_options = dict(cov=cov.upper(), scale_est=scale_est,
norm=self.M.__class__.__name__, conv=conv)
# norm is not changed in fit, no old state
# doing the next causes exception
# self.cov = self.scale_est = None #reset for additional fits
# iteration and history could contain wrong state with repeated fit
return RLMResultsWrapper(results) | Fits the model using iteratively reweighted least squares.
The IRLS routine runs until the specified objective converges to `tol`
or `maxiter` has been reached.
Parameters
----------
conv : str
Indicates the convergence criteria.
Available options are "coefs" (the coefficients), "weights" (the
weights in the iteration), "sresid" (the standardized residuals),
and "dev" (the un-normalized log-likelihood for the M
estimator). The default is "dev".
cov : str, optional
'H1', 'H2', or 'H3'
Indicates how the covariance matrix is estimated. Default is 'H1'.
See rlm.RLMResults for more information.
init : str
Specifies method for the initial estimates of the parameters.
Default is None, which means that the least squares estimate
is used. Currently it is the only available choice.
Deprecated and will be removed. There is no choice here.
maxiter : int
The maximum number of iterations to try. Default is 50.
scale_est : str or HuberScale()
'mad' or HuberScale()
Indicates the estimate to use for scaling the weights in the IRLS.
The default is 'mad' (median absolute deviation. Other options are
'HuberScale' for Huber's proposal 2. Huber's proposal 2 has
optional keyword arguments d, tol, and maxiter for specifying the
tuning constant, the convergence tolerance, and the maximum number
of iterations. See statsmodels.robust.scale for more information.
tol : float
The convergence tolerance of the estimate. Default is 1e-8.
update_scale : Bool
If `update_scale` is False then the scale estimate for the
weights is held constant over the iteration. Otherwise, it
is updated for each fit in the iteration. Default is True.
start_params : array_like, optional
Initial guess of the solution of the optimizer. If not provided,
the initial parameters are computed using OLS.
start_scale : float, optional
Initial scale. If update_scale is False, then the scale will be
fixed at this level for the estimation of the mean parameters.
during iteration. If not provided, then the initial scale is
estimated from the OLS residuals
Returns
-------
results : statsmodels.rlm.RLMresults
Results instance | fit | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def summary(self, yname=None, xname=None, title=0, alpha=.05,
return_fmt='text'):
"""
This is for testing the new summary setup
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['IRLS']),
('Norm:', [self.fit_options['norm']]),
('Scale Est.:', [self.fit_options['scale_est']]),
('Cov Type:', [self.fit_options['cov']]),
('Date:', None),
('Time:', None),
('No. Iterations:', ["%d" % self.fit_history['iteration']])
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None)
]
if title is not None:
title = "Robust linear Model Regression Results"
# boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
# add warnings/notes, added to text format only
etext = []
wstr = ("If the model instance has been used for another fit with "
"different fit parameters, then the fit options might not be "
"the correct ones anymore .")
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry | This is for testing the new summary setup | summary | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def summary2(self, xname=None, yname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary function for regression results
Parameters
----------
yname : str
Name of the dependent variable (optional)
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
return smry | Experimental summary function for regression results
Parameters
----------
yname : str
Name of the dependent variable (optional)
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results | summary2 | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def coef_normalize_cov_truncated(frac, k_vars):
"""factor for consistency of truncated cov at normal distribution
This is usually denoted by `b`. Here, it is calculated as `1 / b`.
Trimming threshold is based on chisquare distribution.
Parameters
----------
frac: float in (0, 1)
fraction (probability) of observations that are not trimmed
k_vars : integer
number of variables, i.e. dimension of multivariate random variable
Returns
-------
fac : float
factor to multiply the raw trimmed covariance
Notes
-----
TODO: it might be better to use alpha = 1 - frac as argument instead.
Uses explicit formula from Riani, Cerioli and Torti (2014) equation (3)
which is also in Rocke and Woodroff (1996) Outliers equation (5).
References
----------
.. [1] Riani, Marco, Andrea Cerioli, and Francesca Torti. “On Consistency
Factors and Efficiency of Robust S-Estimators.” TEST 23, no. 2 (February
4, 2014): 356–87. https://doi.org/10.1007/s11749-014-0357-7.
.. [2] Rocke, David M., and David L. Woodruff. “Identification of Outliers
in Multivariate Data.” Journal of the American Statistical
Association 91, no. 435 (1996): 1047–61.
https://doi.org/10.2307/2291724.
"""
# todo: use isf(alpha, k_vars) instead?
fac = 1 / (stats.chi2.cdf(stats.chi2.ppf(frac, k_vars), k_vars + 2) / frac)
return fac | factor for consistency of truncated cov at normal distribution
This is usually denoted by `b`. Here, it is calculated as `1 / b`.
Trimming threshold is based on chisquare distribution.
Parameters
----------
frac: float in (0, 1)
fraction (probability) of observations that are not trimmed
k_vars : integer
number of variables, i.e. dimension of multivariate random variable
Returns
-------
fac : float
factor to multiply the raw trimmed covariance
Notes
-----
TODO: it might be better to use alpha = 1 - frac as argument instead.
Uses explicit formula from Riani, Cerioli and Torti (2014) equation (3)
which is also in Rocke and Woodroff (1996) Outliers equation (5).
References
----------
.. [1] Riani, Marco, Andrea Cerioli, and Francesca Torti. “On Consistency
Factors and Efficiency of Robust S-Estimators.” TEST 23, no. 2 (February
4, 2014): 356–87. https://doi.org/10.1007/s11749-014-0357-7.
.. [2] Rocke, David M., and David L. Woodruff. “Identification of Outliers
in Multivariate Data.” Journal of the American Statistical
Association 91, no. 435 (1996): 1047–61.
https://doi.org/10.2307/2291724. | coef_normalize_cov_truncated | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _reweight(x, loc, cov, trim_frac=0.975, ddof=1):
"""Reweighting step, trims data and computes Pearson covariance
Parameters
----------
x : ndarray
Multivariate data with observation in rows
loc : ndarray
Location, mean or center of the data.
cov : ndarray
Covariance for computing Mahalanobis distance
trim_frac : float in (0, 1)
# todo: change name
This is the coverage, (1 - trim_frac) is tail probability for chi2
distribution.
ddof : int or float
Delta degrees of freedom used for trimmed Pearson covariance
computed with `np.cov`.
Returns
-------
cov : ndarray
Covariance matrix of trimmed data, not rescaled to account for
trimming.
loc : ndarray
Mean of trimmed data.
See Also
--------
coef_normalize_cov_truncated
Notes
-----
This reweighting step is used in OGK and in literature also for MCD.
Trimming is metric with cutoff computed under the assumption that the
Mahalanobis distances are chi-square distributed.
References
----------
???
"""
beta = trim_frac
nobs, k_vars = x.shape # noqa: F841
# d = (((z - loc_z) / scale_z)**2).sum(1) # for orthogonal
d = mahalanobis(x - loc, cov)
# only hard thresholding right now
dmed = np.median(d)
cutoff = (dmed * stats.chi2.isf(1-beta, k_vars) /
stats.chi2.ppf(0.5, k_vars))
mask = d <= cutoff
sample = x[mask]
loc = sample.mean(0)
cov = np.cov(sample.T, ddof=ddof)
return cov, loc | Reweighting step, trims data and computes Pearson covariance
Parameters
----------
x : ndarray
Multivariate data with observation in rows
loc : ndarray
Location, mean or center of the data.
cov : ndarray
Covariance for computing Mahalanobis distance
trim_frac : float in (0, 1)
# todo: change name
This is the coverage, (1 - trim_frac) is tail probability for chi2
distribution.
ddof : int or float
Delta degrees of freedom used for trimmed Pearson covariance
computed with `np.cov`.
Returns
-------
cov : ndarray
Covariance matrix of trimmed data, not rescaled to account for
trimming.
loc : ndarray
Mean of trimmed data.
See Also
--------
coef_normalize_cov_truncated
Notes
-----
This reweighting step is used in OGK and in literature also for MCD.
Trimming is metric with cutoff computed under the assumption that the
Mahalanobis distances are chi-square distributed.
References
----------
??? | _reweight | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _rescale(x, loc, cov, prob=0.5):
"""rescale covariance to be consistent with normal distribution
This matches median of mahalanobis distance with the chi-square
distribution. This assumes that the data is normally distributed.
Parameters
----------
x : array-like
sample data, 2-dim with observation in rows
loc : ndarray
mean or center of data
cov : ndarray
covariance estimate
Returns
-------
ndarray: rescaled covariance
Notes
-----
This rescaling is used in several functions to compute rescaled
Mahalanobis distances for trimming.
"""
if prob != 0.5:
raise ValueError("currently only median prob=0.5 supported")
x = np.asarray(x)
k_vars = x.shape[1]
d = mahalanobis(x - loc, cov)
dmed = np.median(d)
fac = dmed / stats.chi2.ppf(prob, k_vars)
return cov * fac | rescale covariance to be consistent with normal distribution
This matches median of mahalanobis distance with the chi-square
distribution. This assumes that the data is normally distributed.
Parameters
----------
x : array-like
sample data, 2-dim with observation in rows
loc : ndarray
mean or center of data
cov : ndarray
covariance estimate
Returns
-------
ndarray: rescaled covariance
Notes
-----
This rescaling is used in several functions to compute rescaled
Mahalanobis distances for trimming. | _rescale | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _outlier_gy(d, distr=None, k_endog=1, trim_prob=0.975):
"""determine outlier fraction given reference distribution
This implements the outlier cutoff of Gervini and Yohai 2002
for use in efficient reweighting.
Parameters
----------
d : array_like, 1-D
array of squared standardized residuals or Mahalanobis distance
distr : None or distribution instance
reference distribution of d, needs cdf and ppf methods.
If None, then chisquare with k_endog degrees of freedom is
used. Otherwise, it should be a callable that provides the
cdf function
k_endog : int or float
used only if cdf is None. In that case, it provides the degrees
of freedom for the chisquare distribution.
trim_prob : float in (0.5, 1)
threshold for the tail probability at which the search for
trimming or outlier fraction starts.
Returns
-------
frac : float
fraction of outliers
cutoff : float
cutoff value, values with `d > cutoff` are considered outliers
ntail : int
number of outliers
ntail0 : int
initial number of outliers based on trim tail probability.
cutoff0 : float
initial cutoff value based on trim tail probability.
Notes
-----
This does not fully correct for multiple testing and does not
maintain a familywise error rate or false discovery rate.
The error rate goes to zero asymptotically under the null model,
i.e. if there are no outliers.
This might not handle threshold points correctly with discrete
distribution.
TODO: check weak versus strict inequalities (e.g. in isf)
This only checks the upper tail of the distribution and of `d`.
"""
d = np.asarray(d)
nobs = d.shape[0]
if distr is None:
distr = stats.chi2(k_endog)
threshold = distr.isf(1 - trim_prob)
# get sorted array, we only need upper tail
dtail = np.sort(d[d >= threshold])
ntail0 = len(dtail)
if ntail0 == 0:
# no values above threshold
return 0, threshold, 0, 0, threshold
# using (n-1) / n as in GY2002
ranks = np.arange(nobs - ntail0, nobs) / nobs
frac = np.maximum(0, distr.cdf(dtail) - ranks).max()
ntail = int(nobs * frac) # rounding down
if ntail > 0:
cutoff = dtail[-ntail - 1]
else:
cutoff = dtail[-1] + 1e-15 # not sure, check inequality
if (dtail > cutoff).sum() < ntail:
import warnings
warnings.warn('ties at cutoff, cutoff rule produces fewer'
'outliers than `ntail`')
return frac, cutoff, ntail, ntail0, threshold | determine outlier fraction given reference distribution
This implements the outlier cutoff of Gervini and Yohai 2002
for use in efficient reweighting.
Parameters
----------
d : array_like, 1-D
array of squared standardized residuals or Mahalanobis distance
distr : None or distribution instance
reference distribution of d, needs cdf and ppf methods.
If None, then chisquare with k_endog degrees of freedom is
used. Otherwise, it should be a callable that provides the
cdf function
k_endog : int or float
used only if cdf is None. In that case, it provides the degrees
of freedom for the chisquare distribution.
trim_prob : float in (0.5, 1)
threshold for the tail probability at which the search for
trimming or outlier fraction starts.
Returns
-------
frac : float
fraction of outliers
cutoff : float
cutoff value, values with `d > cutoff` are considered outliers
ntail : int
number of outliers
ntail0 : int
initial number of outliers based on trim tail probability.
cutoff0 : float
initial cutoff value based on trim tail probability.
Notes
-----
This does not fully correct for multiple testing and does not
maintain a familywise error rate or false discovery rate.
The error rate goes to zero asymptotically under the null model,
i.e. if there are no outliers.
This might not handle threshold points correctly with discrete
distribution.
TODO: check weak versus strict inequalities (e.g. in isf)
This only checks the upper tail of the distribution and of `d`. | _outlier_gy | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def mahalanobis(data, cov=None, cov_inv=None, sqrt=False):
"""Mahalanobis distance squared
Note: this is without taking the square root.
assumes data is already centered.
Parameters
----------
data : array-like
Multivariate data with observation in rows.
cov : None or ndarray
Covariance matrix used in computing distance.
This is only used if cov_inv is None.
cov_inv : None or ndarray
Inverse ovariance matrix used in computing distance.
One of cov and cov_inv needs to be provided.
sqrt : bool
If False, then the squared distance is returned.
If True, then the square root is returmend.
Return
------
ndarray : Mahalanobis distances or squared distance.
"""
# another option would be to allow also cov^{-0.5) as keyword
x = np.asarray(data)
if cov_inv is not None:
# einsum might be a bit faster
d = (x * cov_inv.dot(x.T).T).sum(1)
elif cov is not None:
d = (x * np.linalg.solve(cov, x.T).T).sum(1)
else:
raise ValueError('either cov or cov_inv needs to be given')
if sqrt:
d = np.sqrt(d)
return d | Mahalanobis distance squared
Note: this is without taking the square root.
assumes data is already centered.
Parameters
----------
data : array-like
Multivariate data with observation in rows.
cov : None or ndarray
Covariance matrix used in computing distance.
This is only used if cov_inv is None.
cov_inv : None or ndarray
Inverse ovariance matrix used in computing distance.
One of cov and cov_inv needs to be provided.
sqrt : bool
If False, then the squared distance is returned.
If True, then the square root is returmend.
Return
------
ndarray : Mahalanobis distances or squared distance. | mahalanobis | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_gk1(x, y, scale_func=mad):
"""Gnanadesikan and Kettenring covariance between two variables.
Parameters
----------
x : ndarray
Data array.
y : ndarray
Data array
scale_func : callable
Scale function used in computing covariance.
Default is median absolute deviation, MAD.
Returns
-------
ndarray : GK covariance between x and y.
"""
s1 = scale_func(x + y)
s2 = scale_func(x - y)
return (s1**2 - s2**2) / 4 | Gnanadesikan and Kettenring covariance between two variables.
Parameters
----------
x : ndarray
Data array.
y : ndarray
Data array
scale_func : callable
Scale function used in computing covariance.
Default is median absolute deviation, MAD.
Returns
-------
ndarray : GK covariance between x and y. | cov_gk1 | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_gk(data, scale_func=mad):
"""Gnanadesikan and Kettenring covariance matrix estimator
Parameters
----------
data : ndarray
Multivariate data array with observations in rows.
scale_func : callable
Scale function used in computing covariance.
Default is median absolute deviation, MAD.
Returns
-------
ndarray : GK covariance matrix of the data
Notes
-----
This uses a loop over pairs of variables with cov_gk1 to avoid large
intermediate arrays.
"""
x = np.asarray(data)
if x.ndim != 2:
raise ValueError('data needs to be two dimensional')
nobs, k_vars = x.shape # noqa: F841
cov = np.diag(scale_func(x)**2)
for i in range(k_vars):
for j in range(i):
cij = cov_gk1(x[:, i], x[:, j], scale_func=scale_func)
cov[i, j] = cov[j, i] = cij
return cov | Gnanadesikan and Kettenring covariance matrix estimator
Parameters
----------
data : ndarray
Multivariate data array with observations in rows.
scale_func : callable
Scale function used in computing covariance.
Default is median absolute deviation, MAD.
Returns
-------
ndarray : GK covariance matrix of the data
Notes
-----
This uses a loop over pairs of variables with cov_gk1 to avoid large
intermediate arrays. | cov_gk | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_tyler(data, start_cov=None, normalize=False, maxiter=100, eps=1e-13):
"""Tyler's M-estimator for normalized covariance (scatter)
The underlying (population) mean of the data is assumed to be zero.
Parameters
----------
data : array-like
data array with observations in rows and variables in columns
start_cov : None or ndarray
starting covariance for iterative solution
normalize : False or string
If normalize is False (default), then the unscaled tyler scatter matrix
is returned.
Three types of normalization, i.e. rescaling are available by defining
string option:
- "trace" :
The scatter matrix is normalized to have trace equal to the number
of columns in the data.
- "det" :
The scatter matrix is normalized to have determinant equal to 1.
- "normal" :
The scatter matrix is rescaled to be consistent when data is normally
distributed. Rescaling is based on median of the mahalanobis
distances and assuming chisquare distribution of the distances.
- "weights" :
The scatter matrix is rescaled by the sum of weights.
see Ollila et al 2023
maxiter : int
maximum number of iterations to find the solution.
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
Holder instance with the following attributes
cov : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
References
----------
.. [1] Tyler, David E. “A Distribution-Free M-Estimator of Multivariate
Scatter.” The Annals of Statistics 15, no. 1 (March 1, 1987): 234–51.
.. [2] Soloveychik, I., and A. Wiesel. 2014. Tyler's Covariance Matrix
Estimator in Elliptical Models With Convex Structure.
IEEE Transactions on Signal Processing 62 (20): 5251-59.
doi:10.1109/TSP.2014.2348951.
.. [3] Ollila, Esa, Daniel P. Palomar, and Frederic Pascal.
“Affine Equivariant Tyler’s M-Estimator Applied to Tail Parameter
Learning of Elliptical Distributions.” arXiv, May 7, 2023.
https://doi.org/10.48550/arXiv.2305.04330.
"""
x = np.asarray(data)
nobs, k_vars = x.shape
# kn = k_vars * 1. / nobs
if start_cov is not None:
c = start_cov
else:
c = np.diag(mad(x, center=0)**2)
# Tyler's M-estimator of shape (scatter) matrix
for i in range(maxiter):
# this is old code, slower than new version, but more literal
# c_inv = np.linalg.pinv(c)
# c_old = c
# c = kn * sum(np.outer(xi, xi) / np.inner(xi, c_inv.dot(xi))
# for xi in x)
c_old = c
ichol, _ = dtrtri(linalg.cholesky(c, lower=False), lower=0)
v = x @ ichol
dist_mahal_2 = np.einsum('ij,ji->i', v, v.T)
weights = k_vars / dist_mahal_2[:, None]
xw = np.sqrt(weights) * x
c = xw.T @ xw / nobs
diff = np.max(np.abs(c - c_old))
if diff < eps:
break
if normalize is False or normalize is None:
pass
elif normalize == "trace":
c /= np.trace(c) / k_vars
elif normalize == "det":
c /= np.linalg.det(c)**(1. / k_vars)
elif normalize == "normal":
_rescale(x, np.zeros(k_vars), c, prob=0.5)
elif normalize == "weights":
c /= weights.mean() / (np.trace(c) / k_vars)
else:
msg = 'normalize needs to be False, "trace", "det" or "normal"'
raise ValueError(msg)
return Holder(cov=c, n_iter=i, method="tyler") | Tyler's M-estimator for normalized covariance (scatter)
The underlying (population) mean of the data is assumed to be zero.
Parameters
----------
data : array-like
data array with observations in rows and variables in columns
start_cov : None or ndarray
starting covariance for iterative solution
normalize : False or string
If normalize is False (default), then the unscaled tyler scatter matrix
is returned.
Three types of normalization, i.e. rescaling are available by defining
string option:
- "trace" :
The scatter matrix is normalized to have trace equal to the number
of columns in the data.
- "det" :
The scatter matrix is normalized to have determinant equal to 1.
- "normal" :
The scatter matrix is rescaled to be consistent when data is normally
distributed. Rescaling is based on median of the mahalanobis
distances and assuming chisquare distribution of the distances.
- "weights" :
The scatter matrix is rescaled by the sum of weights.
see Ollila et al 2023
maxiter : int
maximum number of iterations to find the solution.
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
Holder instance with the following attributes
cov : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
References
----------
.. [1] Tyler, David E. “A Distribution-Free M-Estimator of Multivariate
Scatter.” The Annals of Statistics 15, no. 1 (March 1, 1987): 234–51.
.. [2] Soloveychik, I., and A. Wiesel. 2014. Tyler's Covariance Matrix
Estimator in Elliptical Models With Convex Structure.
IEEE Transactions on Signal Processing 62 (20): 5251-59.
doi:10.1109/TSP.2014.2348951.
.. [3] Ollila, Esa, Daniel P. Palomar, and Frederic Pascal.
“Affine Equivariant Tyler’s M-Estimator Applied to Tail Parameter
Learning of Elliptical Distributions.” arXiv, May 7, 2023.
https://doi.org/10.48550/arXiv.2305.04330. | cov_tyler | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_tyler_regularized(data, start_cov=None, normalize=False,
shrinkage_factor=None,
maxiter=100, eps=1e-13):
"""Regularized Tyler's M-estimator for normalized covariance (shape).
The underlying (population) mean of the data is assumed to be zero.
Parameters
----------
data : ndarray
data array with observations in rows and variables in columns.
start_cov : None or ndarray
starting covariance for iterative solution
normalize : bool
If True, then the scatter matrix is normalized to have trace equalt
to the number of columns in the data.
shrinkage_factor : None or float in [0, 1]
Shrinkage for the scatter estimate. If it is zero, then no shrinkage
is performed. If it is None, then the shrinkage factor will be
determined by a plugin estimator
maxiter : int
maximum number of iterations to find the solution
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
result instance with the following attributes
cov : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
shrinkage_factor : float
shrinkage factor that was used in the estimation. This will be the
same as the function argument if it was not None.
Notes
-----
If the shrinkage factor is None, then a plugin is used as described in
Chen and Wiesel 2011. The required trace for a pilot scatter estimate is
obtained by the covariance rescaled by MAD estimate for the variance.
References
----------
.. [1] Chen, Yilun, A. Wiesel, and A.O. Hero. “Robust Shrinkage
Estimation of High-Dimensional Covariance Matrices.” IEEE Transactions
on Signal Processing 59, no. 9 (September 2011): 4097–4107.
https://doi.org/10.1109/TSP.2011.2138698.
"""
x = np.asarray(data)
nobs, k_vars = x.shape
kn = k_vars * 1. / nobs
# calculate MAD only once if needed
if start_cov is None or shrinkage_factor is None:
scale_mad = mad(x, center=0)
corr = None
if shrinkage_factor is None:
# maybe some things here are redundant
xd = x / x.std(0) # scale_mad
corr = xd.T.dot(xd)
corr * np.outer(scale_mad, scale_mad)
corr *= k_vars / np.trace(corr)
tr = np.trace(corr.dot(corr))
n, k = nobs, k_vars
# Chen and Wiesel 2011 equation (13)
sf = k*k + (1 - 2./k) * tr
sf /= (k*k - n*k - 2*n) + (n + 1 + 2. * (n - 1.) / k) * tr
shrinkage_factor = sf
if start_cov is not None:
c = start_cov
else:
c = np.diag(scale_mad**2)
identity = np.eye(k_vars)
for i in range(maxiter):
c_inv = np.linalg.pinv(c)
c_old = c
# this could be vectorized but could use a lot of memory
# TODO: try to work in vectorized batches
c0 = kn * sum(np.outer(xi, xi) / np.inner(xi, c_inv.dot(xi))
for xi in x)
if shrinkage_factor != 0:
c = (1 - shrinkage_factor) * c0 + shrinkage_factor * identity
else:
c = c0
c *= k_vars / np.trace(c)
diff = np.max(np.abs(c - c_old))
if diff < eps:
break
res = Holder(cov=c, n_iter=i, shrinkage_factor=shrinkage_factor,
corr=corr)
return res | Regularized Tyler's M-estimator for normalized covariance (shape).
The underlying (population) mean of the data is assumed to be zero.
Parameters
----------
data : ndarray
data array with observations in rows and variables in columns.
start_cov : None or ndarray
starting covariance for iterative solution
normalize : bool
If True, then the scatter matrix is normalized to have trace equalt
to the number of columns in the data.
shrinkage_factor : None or float in [0, 1]
Shrinkage for the scatter estimate. If it is zero, then no shrinkage
is performed. If it is None, then the shrinkage factor will be
determined by a plugin estimator
maxiter : int
maximum number of iterations to find the solution
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
result instance with the following attributes
cov : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
shrinkage_factor : float
shrinkage factor that was used in the estimation. This will be the
same as the function argument if it was not None.
Notes
-----
If the shrinkage factor is None, then a plugin is used as described in
Chen and Wiesel 2011. The required trace for a pilot scatter estimate is
obtained by the covariance rescaled by MAD estimate for the variance.
References
----------
.. [1] Chen, Yilun, A. Wiesel, and A.O. Hero. “Robust Shrinkage
Estimation of High-Dimensional Covariance Matrices.” IEEE Transactions
on Signal Processing 59, no. 9 (September 2011): 4097–4107.
https://doi.org/10.1109/TSP.2011.2138698. | cov_tyler_regularized | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_tyler_pairs_regularized(data_iterator, start_cov=None, normalize=False,
shrinkage_factor=None, nobs=None, k_vars=None,
maxiter=100, eps=1e-13):
"""Tyler's M-estimator for normalized covariance (scatter)
The underlying (population) mean of the data is assumed to be zero.
experimental, calculation of startcov and shrinkage factor doesn't work
This is intended for cluster robust and HAC covariance matrices that need
to iterate over pairs of observations that are correlated.
Parameters
----------
data_iterator : restartable iterator
needs to provide three elements xi, xj and w
start_cov : None or ndarray
starting covariance for iterative solution
normalize : bool
If True, then the scatter matrix is normalized to have trace equalt
to the number of columns in the data.
shrinkage_factor : None or float in [0, 1]
Shrinkage for the scatter estimate. If it is zero, then no shrinkage
is performed. If it is None, then the shrinkage factor will be
determined by a plugin estimator
maxiter : int
maximum number of iterations to find the solution
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
scatter : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
shrinkage_factor : float
shrinkage factor that was used in the estimation. This will be the
same as the function argument if it was not None.
Notes
-----
If the shrinkage factor is None, then a plugin is used as described in
Chen and Wiesel 2011. The required trace for a pilot scatter estimate is
obtained by the covariance rescaled by MAD estimate for the variance.
References
----------
.. [1] Chen, Yilun, A. Wiesel, and A.O. Hero. “Robust Shrinkage Estimation
of High-Dimensional Covariance Matrices.” IEEE Transactions on Signal
Processing 59, no. 9 (September 2011): 4097–4107.
https://doi.org/10.1109/TSP.2011.2138698.
"""
x = data_iterator
# x = np.asarray(data)
# nobs, k_vars = x.shape
# calculate MAD only once if needed
if start_cov is None or shrinkage_factor is None:
scale_mad = mad(x, center=0)
corr = None
if shrinkage_factor is None:
# maybe some things here are redundant
xd = x / x.std(0) # scale_mad
corr = xd.T.dot(xd)
corr * np.outer(scale_mad, scale_mad)
corr *= k_vars / np.trace(corr)
tr = np.trace(corr.dot(corr))
n, k = nobs, k_vars
# Chen and Wiesel 2011 equation (13)
sf = k*k + (1 - 2./k) * tr
sf /= (k*k - n*k - 2*n) + (n + 1 + 2. * (n - 1.) / k) * tr
shrinkage_factor = sf
if start_cov is not None:
c = start_cov
else:
c = np.diag(scale_mad**2)
identity = np.eye(k_vars)
kn = k_vars * 1. / nobs
for i in range(maxiter):
c_inv = np.linalg.pinv(c)
c_old = c
# this could be vectorized but could use a lot of memory
# TODO: try to work in vectorized batches
# weights is a problem if iterator should be ndarray
# c0 = kn * sum(np.outer(xi, xj) / np.inner(xi, c_inv.dot(xj))
# for xi, xj in x)
c0 = kn * sum(np.outer(xij[0], xij[1]) /
np.inner(xij[0], c_inv.dot(xij[1])) for xij in x)
if shrinkage_factor != 0:
c = (1 - shrinkage_factor) * c0 + shrinkage_factor * identity
else:
c = c0
c *= k_vars / np.trace(c)
diff = np.max(np.abs(c - c_old))
if diff < eps:
break
res = Holder(cov=c, n_iter=i, shrinkage_factor=shrinkage_factor,
corr=corr)
return res | Tyler's M-estimator for normalized covariance (scatter)
The underlying (population) mean of the data is assumed to be zero.
experimental, calculation of startcov and shrinkage factor doesn't work
This is intended for cluster robust and HAC covariance matrices that need
to iterate over pairs of observations that are correlated.
Parameters
----------
data_iterator : restartable iterator
needs to provide three elements xi, xj and w
start_cov : None or ndarray
starting covariance for iterative solution
normalize : bool
If True, then the scatter matrix is normalized to have trace equalt
to the number of columns in the data.
shrinkage_factor : None or float in [0, 1]
Shrinkage for the scatter estimate. If it is zero, then no shrinkage
is performed. If it is None, then the shrinkage factor will be
determined by a plugin estimator
maxiter : int
maximum number of iterations to find the solution
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
scatter : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
shrinkage_factor : float
shrinkage factor that was used in the estimation. This will be the
same as the function argument if it was not None.
Notes
-----
If the shrinkage factor is None, then a plugin is used as described in
Chen and Wiesel 2011. The required trace for a pilot scatter estimate is
obtained by the covariance rescaled by MAD estimate for the variance.
References
----------
.. [1] Chen, Yilun, A. Wiesel, and A.O. Hero. “Robust Shrinkage Estimation
of High-Dimensional Covariance Matrices.” IEEE Transactions on Signal
Processing 59, no. 9 (September 2011): 4097–4107.
https://doi.org/10.1109/TSP.2011.2138698. | cov_tyler_pairs_regularized | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_weighted(data, weights, center=None, weights_cov=None,
weights_cov_denom=None, ddof=1):
"""weighted mean and covariance (for M-estimators)
wmean = sum (weights * data) / sum(weights)
wcov = sum (weights_cov * data_i data_i') / weights_cov_denom
The options for weights_cov_denom are described in Parameters.
By default both mean and cov are averages based on the same
weights.
Parameters
----------
data : array_like, 2-D
observations in rows, variables in columns
no missing value handling
weights : ndarray, 1-D
weights array with length equal to the number of observations
center : None or ndarray (optional)
If None, then the weighted mean is subtracted from the data
If center is provided, then it is used instead of the
weighted mean.
weights_cov : None, ndarray or "det" (optional)
If None, then the same weights as for the mean are used.
weights_cov_denom : None, float or "det" (optional)
specified the denominator for the weighted covariance
If None, then the sum of weights - ddof are used and the covariance is
an average cross product.
If "det", then the weighted covariance is normalized such that
det(wcov) is 1.
If weights_cov_denom is 1, then the weighted cross product is returned
without averaging or scaling (sum of squares).
Otherwise it is used directly as denominator after subtracting
ddof.
ddof : int or float
covariance degrees of freedom correction, only used if
weights_cov_denom is None or a float.
Notes
-----
The extra options are available to cover the general M-estimator
for location and scatter with estimating equations (using data x):
sum (weights * (x - m)) = 0
sum (weights_cov * (x_i - m) * (x_i - m)') - weights_cov_denom * cov = 0
where the weights are functions of the mahalonibis distance of the
residuals, and m is the mean.
In the default case
wmean = ave (w_i x_i)
wcov = ave (w_i (x_i - m) (x_i - m)')
References
----------
.. [1] Rocke, D. M., and D. L. Woodruff. 1993. Computation of Robust
Estimates of Multivariate Location and Shape.
Statistica Neerlandica 47 (1): 27-42.
doi:10.1111/j.1467-9574.1993.tb01404.x.
"""
wsum = weights.sum()
if weights_cov is None:
weights_cov = weights
wsum_cov = wsum
else:
wsum_cov = None # calculate below only if needed
if center is None:
wmean = weights.dot(data) / wsum
else:
wmean = center
xdm = data - wmean
wcov = (weights_cov * xdm.T).dot(xdm)
if weights_cov_denom is None:
if wsum_cov is None:
wsum_cov = weights_cov.sum()
wcov /= (wsum_cov - ddof) # * np.sum(weights_cov**2) / wsum_cov)
elif weights_cov_denom == "det":
wcov /= np.linalg.det(wcov)**(1 / wcov.shape[0])
elif weights_cov_denom == 1:
pass
else:
wcov /= (weights_cov_denom - ddof)
return wcov, wmean | weighted mean and covariance (for M-estimators)
wmean = sum (weights * data) / sum(weights)
wcov = sum (weights_cov * data_i data_i') / weights_cov_denom
The options for weights_cov_denom are described in Parameters.
By default both mean and cov are averages based on the same
weights.
Parameters
----------
data : array_like, 2-D
observations in rows, variables in columns
no missing value handling
weights : ndarray, 1-D
weights array with length equal to the number of observations
center : None or ndarray (optional)
If None, then the weighted mean is subtracted from the data
If center is provided, then it is used instead of the
weighted mean.
weights_cov : None, ndarray or "det" (optional)
If None, then the same weights as for the mean are used.
weights_cov_denom : None, float or "det" (optional)
specified the denominator for the weighted covariance
If None, then the sum of weights - ddof are used and the covariance is
an average cross product.
If "det", then the weighted covariance is normalized such that
det(wcov) is 1.
If weights_cov_denom is 1, then the weighted cross product is returned
without averaging or scaling (sum of squares).
Otherwise it is used directly as denominator after subtracting
ddof.
ddof : int or float
covariance degrees of freedom correction, only used if
weights_cov_denom is None or a float.
Notes
-----
The extra options are available to cover the general M-estimator
for location and scatter with estimating equations (using data x):
sum (weights * (x - m)) = 0
sum (weights_cov * (x_i - m) * (x_i - m)') - weights_cov_denom * cov = 0
where the weights are functions of the mahalonibis distance of the
residuals, and m is the mean.
In the default case
wmean = ave (w_i x_i)
wcov = ave (w_i (x_i - m) (x_i - m)')
References
----------
.. [1] Rocke, D. M., and D. L. Woodruff. 1993. Computation of Robust
Estimates of Multivariate Location and Shape.
Statistica Neerlandica 47 (1): 27-42.
doi:10.1111/j.1467-9574.1993.tb01404.x. | cov_weighted | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def weights_mvt(distance, df, k_vars):
"""weight function based on multivariate t distribution
Parameters
----------
distance : ndarray
mahalanobis distance
df : int or float
degrees of freedom of the t distribution
k_vars : int
number of variables in the multivariate sample
Returns
-------
weights : ndarray
weights calculated for the given distances.
References
----------
.. [1] Finegold, Michael A., and Mathias Drton. 2014. Robust Graphical
Modeling with T-Distributions. arXiv:1408.2033 [Cs, Stat], August.
http://arxiv.org/abs/1408.2033.
.. [2] Finegold, Michael, and Mathias Drton. 2011. Robust graphical
modeling of gene networks using classical and alternative
t-distributions. The Annals of Applied Statistics 5 (2A): 1057-80.
"""
w = (df + k_vars) / (df + distance)
return w | weight function based on multivariate t distribution
Parameters
----------
distance : ndarray
mahalanobis distance
df : int or float
degrees of freedom of the t distribution
k_vars : int
number of variables in the multivariate sample
Returns
-------
weights : ndarray
weights calculated for the given distances.
References
----------
.. [1] Finegold, Michael A., and Mathias Drton. 2014. Robust Graphical
Modeling with T-Distributions. arXiv:1408.2033 [Cs, Stat], August.
http://arxiv.org/abs/1408.2033.
.. [2] Finegold, Michael, and Mathias Drton. 2011. Robust graphical
modeling of gene networks using classical and alternative
t-distributions. The Annals of Applied Statistics 5 (2A): 1057-80. | weights_mvt | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def weights_quantile(distance, frac=0.5, rescale=True):
"""Weight function for cutoff weights.
The weight function is an indicator function for distances smaller then
the frac quantile.
rescale option is not supported.
"""
cutoff = np.percentile(distance, frac * 100)
w = (distance < cutoff).astype(int)
return w | Weight function for cutoff weights.
The weight function is an indicator function for distances smaller then
the frac quantile.
rescale option is not supported. | weights_quantile | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _cov_iter(data, weights_func, weights_args=None, cov_init=None,
rescale='med', maxiter=3, atol=1e-14, rtol=1e-6):
"""Iterative robust covariance estimation using weights.
This is in the style of M-estimators for given weight function.
Note: ??? Whether this is normalized to be consistent with the
multivariate normal case depends on the weight function.
maybe it is consistent, it's just a weighted cov.
TODO: options for rescale instead of just median
Parameters
----------
data : array_like
weights_func : callable
function to calculate weights from the distances and weights_args
weights_args : tuple
extra arguments for the weights_func
cov_init : ndarray, square 2-D
initial covariance matrix
rescale : "med" or "none"
If "med" then the resulting covariance matrix is normalized so it is
approximately consistent with the normal distribution. Rescaling is
based on the median of the distances and of the chisquare distribution.
Other options are not yet available.
If rescale is the string "none", then no rescaling is performed.
Returns
-------
Holder instance with attributes: cov, mean, w, dist, it, converged
Notes
-----
This iterates over calculating the mahalanobis distance and weighted
covariance. See Feingold and Drton 2014 for the motivation using weights
based on the multivariate t distribution. Note that this does not calculate
their alternative t distribution which requires numerical or Monte Carlo
integration.
References
----------
.. [1] Finegold, Michael, and Mathias Drton. 2011. Robust graphical
modeling of gene networks using classical and alternative
t-distributions. Annals of Applied Statistics 5 (2A): 1057-80.
"""
data = np.asarray(data)
nobs, k_vars = data.shape # noqa: F841
if cov_init is None:
cov_init = np.cov(data.T)
converged = False
cov = cov_old = cov_init
for it in range(maxiter):
dist = mahalanobis(data, cov=cov)
w = weights_func(dist, *weights_args)
cov, mean = cov_weighted(data, w)
if np.allclose(cov, cov_old, atol=atol, rtol=rtol):
converged = True
break
# recompute maha distance at final estimate
dist = mahalanobis(data, cov=cov)
if rescale == 'none':
s = 1
elif rescale == 'med':
s = np.median(dist) / stats.chi2.ppf(0.5, k_vars)
cov *= s
else:
raise NotImplementedError('only rescale="med" is currently available')
res = Holder(cov=cov, mean=mean, weights=w, mahalanobis=dist,
scale_factor=s, n_iter=it, converged=converged,
method="m-estimator",
weights_func=weights_func,
)
return res | Iterative robust covariance estimation using weights.
This is in the style of M-estimators for given weight function.
Note: ??? Whether this is normalized to be consistent with the
multivariate normal case depends on the weight function.
maybe it is consistent, it's just a weighted cov.
TODO: options for rescale instead of just median
Parameters
----------
data : array_like
weights_func : callable
function to calculate weights from the distances and weights_args
weights_args : tuple
extra arguments for the weights_func
cov_init : ndarray, square 2-D
initial covariance matrix
rescale : "med" or "none"
If "med" then the resulting covariance matrix is normalized so it is
approximately consistent with the normal distribution. Rescaling is
based on the median of the distances and of the chisquare distribution.
Other options are not yet available.
If rescale is the string "none", then no rescaling is performed.
Returns
-------
Holder instance with attributes: cov, mean, w, dist, it, converged
Notes
-----
This iterates over calculating the mahalanobis distance and weighted
covariance. See Feingold and Drton 2014 for the motivation using weights
based on the multivariate t distribution. Note that this does not calculate
their alternative t distribution which requires numerical or Monte Carlo
integration.
References
----------
.. [1] Finegold, Michael, and Mathias Drton. 2011. Robust graphical
modeling of gene networks using classical and alternative
t-distributions. Annals of Applied Statistics 5 (2A): 1057-80. | _cov_iter | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _cov_starting(data, standardize=False, quantile=0.5, retransform=False):
"""compute some robust starting covariances
The returned covariance matrices are intended as starting values
for further processing. The main purpose is for algorithms with high
breakdown point.
The quality as standalone covariance matrices varies and might not
be very good.
Preliminary version. This will still be changed. Options and defaults can
change, additional covarince methods will be added and return extended.
Parameters
----------
data : array-like
Multivariate data with observations in rows (axis=0).
standardize : bool
If False, then the data is only centered (by median).
If True, then the data is standardized using median and mad-scale.
This scaling is only intermediate, the returned covariance compensates
for the initial scaling.
quantile : float in [0.5, 1]
Parameter used for `_cov_iter` estimation.
Returns
-------
list of Holder instances with `cov` attribute.
"""
x = np.asarray(data)
nobs, k_vars = x.shape
if standardize:
# there should be a helper function/class
center = np.median(data, axis=0)
xs = (x - center)
std = mad0(data)
xs /= std
else:
center = np.median(data, axis=0)
xs = x - center
std = 1
cov_all = []
d = mahalanobis(xs, cov=None, cov_inv=np.eye(k_vars))
percentiles = [(k_vars+2) / nobs * 100 * 2, 25, 50, 85]
cutoffs = np.percentile(d, percentiles)
for p, cutoff in zip(percentiles, cutoffs):
xsp = xs[d < cutoff]
c = np.cov(xsp.T)
corr_factor = coef_normalize_cov_truncated(p / 100, k_vars)
c0 = Holder(
cov=c * corr_factor,
mean=xsp.mean(0) * std + center,
method="pearson truncated",
)
c01 = _cov_iter(xs, weights_quantile, weights_args=(quantile,),
rescale="med", cov_init=c0.cov, maxiter=100)
c02 = Holder(
cov=_naive_ledoit_wolf_shrinkage(xsp, 0).cov * corr_factor,
mean=xsp.mean(0) * std + center,
method="ledoit_wolf",
)
c03 = _cov_iter(xs, weights_quantile, weights_args=(quantile,),
rescale="med", cov_init=c02.cov, maxiter=100)
if not standardize or not retransform:
cov_all.extend([c0, c01, c02, c03])
else:
# compensate for initial rescaling
# TODO: this does not return list of Holder anymore
s = np.outer(std, std)
cov_all.extend([r.cov * s for r in [c0, c01, c02, c03]])
c2 = cov_ogk(xs)
cov_all.append(c2)
c2raw = Holder(
cov=c2.cov_raw,
mean=c2.loc_raw * std + center,
method="ogk_raw",
)
cov_all.append(c2raw)
z_tanh = np.tanh(xs)
c_th = Holder(
cov=np.corrcoef(z_tanh.T), # not consistently scaled for cov
mean=center, # TODO: do we add inverted mean z_tanh ?
method="tanh",
)
cov_all.append(c_th)
x_spatial = xs / np.sqrt(np.sum(xs**2, axis=1))[:, None]
c_th = Holder(
cov=np.cov(x_spatial.T),
mean=center,
method="spatial",
)
cov_all.append(c_th)
c_th = Holder(
# not consistently scaled for cov
# cov=stats.spearmanr(xs)[0], # not correct shape if k=1 or 2
cov=corr_rank(xs), # always returns matrix, np.corrcoef result
mean=center,
method="spearman",
)
cov_all.append(c_th)
c_ns = Holder(
cov=corr_normal_scores(xs), # not consistently scaled for cov
mean=center, # TODO: do we add inverted mean z_tanh ?
method="normal-scores",
)
cov_all.append(c_ns)
# TODO: rescale back to original space using center and std
return cov_all | compute some robust starting covariances
The returned covariance matrices are intended as starting values
for further processing. The main purpose is for algorithms with high
breakdown point.
The quality as standalone covariance matrices varies and might not
be very good.
Preliminary version. This will still be changed. Options and defaults can
change, additional covarince methods will be added and return extended.
Parameters
----------
data : array-like
Multivariate data with observations in rows (axis=0).
standardize : bool
If False, then the data is only centered (by median).
If True, then the data is standardized using median and mad-scale.
This scaling is only intermediate, the returned covariance compensates
for the initial scaling.
quantile : float in [0.5, 1]
Parameter used for `_cov_iter` estimation.
Returns
-------
list of Holder instances with `cov` attribute. | _cov_starting | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _orthogonalize_det(x, corr, loc_func, scale_func):
"""Orthogonalize
This is a simplified version of the OGK method.
version from DetMCD works on zscored data
(does not return mean and cov of original data)
so we drop the compensation for scaling in zscoring
z is the data here, zscored with robust estimators,
e.g. median and Qn in DetMCD
"""
evals, evecs = np.linalg.eigh(corr) # noqa: F841
z = x.dot(evecs)
transf0 = evecs
scale_z = scale_func(z) # scale of principal components
cov = (transf0 * scale_z**2).dot(transf0.T)
# extra step in DetMCD, sphering data with new cov to compute center
# I think this is equivalent to scaling z
# loc_z = loc_func(z / scale_z) * scale_z # center of principal components
# loc = (transf0 * scale_z).dot(loc_z)
transf1 = (transf0 * scale_z).dot(transf0.T)
# transf1inv = (transf0 * scale_z**(-1)).dot(transf0.T)
# loc = loc_func(x @ transf1inv) @ transf1
loc = loc_func((z / scale_z).dot(transf0.T)) @ transf1
return loc, cov | Orthogonalize
This is a simplified version of the OGK method.
version from DetMCD works on zscored data
(does not return mean and cov of original data)
so we drop the compensation for scaling in zscoring
z is the data here, zscored with robust estimators,
e.g. median and Qn in DetMCD | _orthogonalize_det | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _get_detcov_startidx(z, h, options_start=None, methods_cov="all"):
"""Starting sets for deterministic robust covariance estimators.
These are intended as starting sets for DetMCD, DetS and DetMM.
"""
if options_start is None:
options_start = {}
loc_func = options_start.get("loc_func", median)
scale_func = options_start.get("scale_func", mad)
z = (z - loc_func(z)) / scale_func(z)
if np.squeeze(z).ndim == 1:
# only one random variable
z = np.squeeze(z)
nobs = z.shape[0]
idx_sel = np.argpartition(np.abs(z), h)[:h]
idx_all = [(idx_sel, "abs-resid")]
# next uses symmetric equal-tail trimming
idx_sorted = np.argsort(z)
h_tail = (nobs - h) // 2
idx_all.append((idx_sorted[h_tail : h_tail + h], "trimmed-tail"))
return idx_all
# continue if more than 1 random variable
cov_all = _cov_starting(z, standardize=False, quantile=0.5)
# orthogonalization step
idx_all = []
for c in cov_all:
if not hasattr(c, "method"):
continue
method = c.method
mean, cov = _orthogonalize_det(z, c.cov, loc_func, scale_func)
d = mahalanobis(z, mean, cov)
idx_sel = np.argpartition(d, h)[:h]
idx_all.append((idx_sel, method))
return idx_all | Starting sets for deterministic robust covariance estimators.
These are intended as starting sets for DetMCD, DetS and DetMM. | _get_detcov_startidx | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _fit_mean_shape(self, mean, shape, scale):
"""Estimate mean and shape in iteration step.
This does only one step.
Parameters
----------
mean : ndarray
Starting value for mean
shape : ndarray
Starting value for shape matrix.
scale : float
Starting value for scale.
Returns
-------
Holder instance with updated estimates.
"""
d = mahalanobis(self.data - mean, shape, sqrt=True) / scale
weights_mean = self.weights_mean(d)
weights_cov = self.weights_scatter(d)
res = cov_weighted(
self.data,
weights=weights_mean,
center=None,
weights_cov=weights_cov,
weights_cov_denom="det",
ddof=1,
)
return res | Estimate mean and shape in iteration step.
This does only one step.
Parameters
----------
mean : ndarray
Starting value for mean
shape : ndarray
Starting value for shape matrix.
scale : float
Starting value for scale.
Returns
-------
Holder instance with updated estimates. | _fit_mean_shape | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _fit_scale(self, maha, start_scale=None, maxiter=100, rtol=1e-5,
atol=1e-5):
"""Estimate iterated M-scale.
Parameters
----------
maha : ndarray
start_scale : None or float
Starting scale. If it is None, the mad of maha wi
maxiter : int
Maximum iterations to compute M-scale
rtol, atol : float
Relative and absolute convergence criteria for scale used with
allclose.
Returns
-------
float : scale estimate
"""
if start_scale is None:
# TODO: this does not really make sense
# better scale to median of maha and chi or chi2
start_scale = mad(maha)
scale = rscale._scale_iter(
maha,
scale0=start_scale,
maxiter=maxiter,
rtol=rtol,
atol=atol,
meef_scale=self.rho,
scale_bias=self.scale_bias,
)
return scale | Estimate iterated M-scale.
Parameters
----------
maha : ndarray
start_scale : None or float
Starting scale. If it is None, the mad of maha wi
maxiter : int
Maximum iterations to compute M-scale
rtol, atol : float
Relative and absolute convergence criteria for scale used with
allclose.
Returns
-------
float : scale estimate | _fit_scale | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def fit(self, start_mean=None, start_shape=None, start_scale=None,
maxiter=100, update_scale=True):
"""Estimate mean, shape and scale parameters with MM-estimator.
Parameters
----------
start_mean : None or float
Starting value for mean, center.
If None, then median is used.
start_shape : None or 2-dim ndarray
Starting value of shape matrix, i.e. scatter matrix normalized
to det(scatter) = 1.
If None, then scaled covariance matrix of data is used.
start_scale : None or float.
Starting value of scale.
maxiter : int
Maximum number of iterations.
update_scale : bool
If update_scale is False, then
Returns
-------
results instance with mean, shape, scale, cov and other attributes.
Notes
-----
If start_scale is provided and update_scale is False, then this is
an M-estimator with a predetermined scale as used in the second
stage of an MM-estimator.
"""
converged = False
if start_scale is not None:
scale_old = start_scale
else:
scale_old = 1
# will be reset if start_shape is also None.
if start_mean is not None:
mean_old = start_mean
else:
mean_old = np.median(self.data, axis=0)
if start_shape is not None:
shape_old = start_shape
else:
shape_old = np.cov(self.data.T)
scale = np.linalg.det(shape_old) ** (1 / self.k_vars)
shape_old /= scale
if start_scale is not None:
scale_old = scale
if update_scale is False:
scale = start_scale
for i in range(maxiter):
shape, mean = self._fit_mean_shape(mean_old, shape_old, scale_old)
d = mahalanobis(self.data - mean, shape, sqrt=True)
if update_scale:
scale = self._fit_scale(d, start_scale=scale_old, maxiter=10)
if (np.allclose(scale, scale_old, rtol=1e-5) and
np.allclose(mean, mean_old, rtol=1e-5) and
np.allclose(shape, shape_old, rtol=1e-5)
): # noqa E124
converged = True
break
scale_old = scale
mean_old = mean
shape_old = shape
maha = mahalanobis(self.data - mean, shape / scale, sqrt=True)
res = Holder(
mean=mean,
shape=shape,
scale=scale,
cov=shape * scale**2,
converged=converged,
n_iter=i,
mahalanobis=maha,
)
return res | Estimate mean, shape and scale parameters with MM-estimator.
Parameters
----------
start_mean : None or float
Starting value for mean, center.
If None, then median is used.
start_shape : None or 2-dim ndarray
Starting value of shape matrix, i.e. scatter matrix normalized
to det(scatter) = 1.
If None, then scaled covariance matrix of data is used.
start_scale : None or float.
Starting value of scale.
maxiter : int
Maximum number of iterations.
update_scale : bool
If update_scale is False, then
Returns
-------
results instance with mean, shape, scale, cov and other attributes.
Notes
-----
If start_scale is provided and update_scale is False, then this is
an M-estimator with a predetermined scale as used in the second
stage of an MM-estimator. | fit | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _cstep(self, x, mean, cov, h, maxiter=2, tol=1e-8):
"""C-step for mcd iteration
x is data, perc is percentile h / nobs, don't need perc when we
use np.argpartition
requires starting mean and cov
"""
converged = False
for _ in range(maxiter):
d = mahalanobis(x - mean, cov)
idx_sel = np.argpartition(d, h)[:h]
x_sel = x[idx_sel]
mean = x_sel.mean(0)
cov_new = np.cov(x_sel.T, ddof=1)
if ((cov - cov_new)**2).mean() < tol:
cov = cov_new
converged = True
break
cov = cov_new
return mean, cov, converged | C-step for mcd iteration
x is data, perc is percentile h / nobs, don't need perc when we
use np.argpartition
requires starting mean and cov | _cstep | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _fit_one(self, x, idx, h, maxiter=2, mean=None, cov=None):
"""Compute mcd for one starting set of observations.
Parameters
----------
x : ndarray
Data.
idx : ndarray
Indices or mask of observation in starting set, used as ``x[idx]``
h : int
Number of observations in evaluation set for cov.
maxiter : int
Maximum number of c-steps.
Returns
-------
mean : ndarray
Estimated mean.
cov : ndarray
Estimated covariance.
det : float
Determinant of estimated covariance matrix.
Notes
-----
This does not do any preprocessing of the data and returns the
empirical mean and covariance of evaluation set of the data ``x``.
"""
if idx is not None:
x_sel = x[idx]
else:
x_sel = x
if mean is None:
mean = x_sel.mean(0)
if cov is None:
cov = np.cov(x_sel.T, ddof=1)
# updated with c-step
mean, cov, conv = self._cstep(x, mean, cov, h, maxiter=maxiter)
det = np.linalg.det(cov)
return mean, cov, det, conv | Compute mcd for one starting set of observations.
Parameters
----------
x : ndarray
Data.
idx : ndarray
Indices or mask of observation in starting set, used as ``x[idx]``
h : int
Number of observations in evaluation set for cov.
maxiter : int
Maximum number of c-steps.
Returns
-------
mean : ndarray
Estimated mean.
cov : ndarray
Estimated covariance.
det : float
Determinant of estimated covariance matrix.
Notes
-----
This does not do any preprocessing of the data and returns the
empirical mean and covariance of evaluation set of the data ``x``. | _fit_one | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def fit(self, h, *, h_start=None, mean_func=None, scale_func=None,
maxiter=100, options_start=None, reweight=True,
trim_frac=0.975, maxiter_step=100):
"""
Compute minimum covariance determinant estimate of mean and covariance.
x : array-like
Data with observation in rows and variables in columns.
h : int
Number of observations in evaluation set for minimimizing
determinant.
h_start : int
Number of observations used in starting mean and covariance.
mean_func, scale_func : callable or None.
Mean and scale function for initial standardization.
Current defaults, if they are None, are median and mad, but
default scale_func will likely change.
options_start : None or dict
Options for the starting estimators.
currently not used
TODO: which options? e.g. for OGK
reweight : bool
If reweight is true, then a reweighted estimator is returned. The
reweighting is based on a chisquare trimming of Mahalanobis
distances. The raw results are in the ``results_raw`` attribute.
trim_frac : float in (0, 1)
Trim fraction used if reweight is true. Used to compute quantile
of chisquare distribution with tail probability 1 - trim_frac.
maxiter_step : int
Number of iteration in the c-step.
In the current implementation a small maxiter in the c-step does
not find the optimal solution.
Returns
-------
Holder instance with results
"""
x = self.data
nobs, k_vars = x.shape
if h is None:
h = (nobs + k_vars + 1) // 2 # check with literature
if mean_func is None:
mean_func = lambda x: np.median(x, axis=0) # noqa
if scale_func is None:
scale_func = mad
if options_start is None:
options_start = {}
if h_start is None:
nobs, k_vars = x.shape
h_start = max(nobs // 2 + 1, k_vars + 1)
m = mean_func(x)
s = scale_func(x)
z = (x - m) / s
# get initial mean, cov of standardized data, we only need ranking
# of obs
starts = _get_detcov_startidx(z, h_start, options_start)
fac_trunc = coef_normalize_cov_truncated(h / nobs, k_vars)
res = {}
for ii, ini in enumerate(starts):
idx_sel, method = ini
mean, cov, det, _ = self._fit_one(x, idx_sel, h,
maxiter=maxiter_step)
res[ii] = Holder(
mean=mean,
cov=cov * fac_trunc,
det_subset=det,
method=method,
)
det_all = np.array([i.det_subset for i in res.values()])
idx_best = np.argmin(det_all)
best = res[idx_best]
# mean = best.mean
# cov = best.cov
# need to c-step to convergence for best,
# is with best 2 in original DetMCD
if maxiter_step < maxiter:
mean, cov, det, conv = self._fit_one(x, None, h, maxiter=maxiter,
mean=best.mean, cov=best.cov)
best = Holder(
mean=mean,
cov=cov * fac_trunc,
det_subset=det,
method=method,
converged=conv,
)
# include extra info in returned Holder instance
best.det_all = det_all
best.idx_best = idx_best
best.tmean = m
best.tscale = s
if reweight:
cov, mean = _reweight(x, best.mean, best.cov, trim_frac=trim_frac,
ddof=1)
fac_trunc = coef_normalize_cov_truncated(trim_frac, k_vars)
best_w = Holder(
mean=mean,
cov=cov * fac_trunc,
# det_subset=det,
method=method,
results_raw=best,
)
return best_w
else:
return best # is Holder instance already | Compute minimum covariance determinant estimate of mean and covariance.
x : array-like
Data with observation in rows and variables in columns.
h : int
Number of observations in evaluation set for minimimizing
determinant.
h_start : int
Number of observations used in starting mean and covariance.
mean_func, scale_func : callable or None.
Mean and scale function for initial standardization.
Current defaults, if they are None, are median and mad, but
default scale_func will likely change.
options_start : None or dict
Options for the starting estimators.
currently not used
TODO: which options? e.g. for OGK
reweight : bool
If reweight is true, then a reweighted estimator is returned. The
reweighting is based on a chisquare trimming of Mahalanobis
distances. The raw results are in the ``results_raw`` attribute.
trim_frac : float in (0, 1)
Trim fraction used if reweight is true. Used to compute quantile
of chisquare distribution with tail probability 1 - trim_frac.
maxiter_step : int
Number of iteration in the c-step.
In the current implementation a small maxiter in the c-step does
not find the optimal solution.
Returns
-------
Holder instance with results | fit | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _get_start_params(self, idx):
"""Starting parameters from a subsample given by index
Parameters
----------
idx : ndarray
Index used to select observations from the data. The index is used
for numpy arrays, so it can be either a boolean mask or integers.
Returns
-------
mean : ndarray
Mean of subsample
shape : ndarray
The shape matrix of the subsample which is the covariance
normalized so that determinant of shape is one.
scale : float
Scale of subsample, computed so that cov = shape * scale.
"""
x_sel = self.data[idx]
k = x_sel.shape[1]
mean = x_sel.mean(0)
cov = np.cov(x_sel.T)
scale2 = np.linalg.det(cov) ** (1 / k)
shape = cov / scale2
scale = np.sqrt(scale2)
return mean, shape, scale | Starting parameters from a subsample given by index
Parameters
----------
idx : ndarray
Index used to select observations from the data. The index is used
for numpy arrays, so it can be either a boolean mask or integers.
Returns
-------
mean : ndarray
Mean of subsample
shape : ndarray
The shape matrix of the subsample which is the covariance
normalized so that determinant of shape is one.
scale : float
Scale of subsample, computed so that cov = shape * scale. | _get_start_params | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _fit_one(self, mean=None, shape=None, scale=None, maxiter=100):
"""Compute local M-estimator for one starting set of observations.
Parameters
----------
x : ndarray
Data.
idx : ndarray
Indices or mask of observation in starting set, used as ``x[idx]``
h : int
Number of observations in evaluation set for cov.
maxiter : int
Maximum number of c-steps.
Returns
-------
mean : ndarray
Estimated mean.
cov : ndarray
Estimated covariance.
det : float
Determinant of estimated covariance matrix.
Notes
-----
This uses CovM to solve for the local optimum for given starting
values.
"""
res = self.mod.fit(
start_mean=mean,
start_shape=shape,
start_scale=scale,
maxiter=maxiter,
update_scale=True
)
return res | Compute local M-estimator for one starting set of observations.
Parameters
----------
x : ndarray
Data.
idx : ndarray
Indices or mask of observation in starting set, used as ``x[idx]``
h : int
Number of observations in evaluation set for cov.
maxiter : int
Maximum number of c-steps.
Returns
-------
mean : ndarray
Estimated mean.
cov : ndarray
Estimated covariance.
det : float
Determinant of estimated covariance matrix.
Notes
-----
This uses CovM to solve for the local optimum for given starting
values. | _fit_one | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def fit(self, *, h_start=None, mean_func=None, scale_func=None,
maxiter=100, options_start=None, maxiter_step=5):
"""Compute S-estimator of mean and covariance.
Parameters
----------
h_start : int
Number of observations used in starting mean and covariance.
mean_func, scale_func : callable or None.
Mean and scale function for initial standardization.
Current defaults, if they are None, are median and mad, but
default scale_func will likely change.
options_start : None or dict
Options for the starting estimators.
TODO: which options? e.g. for OGK
Returns
-------
Holder instance with results
"""
x = self.data
nobs, k_vars = x.shape
if mean_func is None:
mean_func = lambda x: np.median(x, axis=0) # noqa
if scale_func is None:
scale_func = mad
if options_start is None:
options_start = {}
if h_start is None:
nobs, k_vars = x.shape
h_start = max(nobs // 2 + 1, k_vars + 1)
m = mean_func(x)
s = scale_func(x)
z = (x - m) / s
# get initial mean, cov of standardized data, we only need ranking
# of obs
starts = _get_detcov_startidx(z, h_start, options_start)
res = {}
for ii, ini in enumerate(starts):
idx_sel, method = ini
mean0, shape0, scale0 = self._get_start_params(idx_sel)
res_i = self._fit_one(
mean=mean0,
shape=shape0,
scale=scale0,
maxiter=maxiter_step,
)
res_i.method = method
res[ii] = res_i
scale_all = np.array([i.scale for i in res.values()])
idx_best = np.argmin(scale_all)
best = res[idx_best]
# mean = best.mean
# cov = best.cov
# need to c-step to convergence for best,
# is with best 2 in original DetMCD
if maxiter_step < maxiter:
best = self._fit_one(
mean=best.mean,
shape=best.shape,
scale=best.scale,
maxiter=maxiter,
)
# include extra info in returned Holder instance
best.scale_all = scale_all
best.idx_best = idx_best
best.tmean = m
best.tscale = s
return best # is Holder instance already | Compute S-estimator of mean and covariance.
Parameters
----------
h_start : int
Number of observations used in starting mean and covariance.
mean_func, scale_func : callable or None.
Mean and scale function for initial standardization.
Current defaults, if they are None, are median and mad, but
default scale_func will likely change.
options_start : None or dict
Options for the starting estimators.
TODO: which options? e.g. for OGK
Returns
-------
Holder instance with results | fit | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def fit(self, maxiter=100):
"""Estimate model parameters.
Parameters
----------
maxiter : int
Maximum number of iterations in the second stage M-estimation.
fit args : dict
currently missing
Returns
-------
Instance of a results or holder class.
Notes
-----
This uses CovDetS for the first stage estimation and CovM with fixed
scale in the second stage MM-estimation.
TODO: fit options are missing.
"""
# first stage estimate
mod_s = CovDetS(
self.data,
norm=None,
breakdown_point=self.breakdown_point
)
res_s = mod_s.fit()
res = self.mod.fit(
start_mean=res_s.mean,
start_shape=res_s.shape,
start_scale=res_s.scale,
maxiter=maxiter,
update_scale=False,
)
return res | Estimate model parameters.
Parameters
----------
maxiter : int
Maximum number of iterations in the second stage M-estimation.
fit args : dict
currently missing
Returns
-------
Instance of a results or holder class.
Notes
-----
This uses CovDetS for the first stage estimation and CovM with fixed
scale in the second stage MM-estimation.
TODO: fit options are missing. | fit | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def test_covdetmcd():
# results from rrcov
# > cdet = CovMcd(x = hbk, raw.only = TRUE, nsamp = "deterministic",
# use.correction=FALSE)
cov_dmcd_r = np.array("""
2.2059619213639 0.0223939863695 0.7898958050933 0.4060613360808
0.0223939863695 1.1384166802155 0.4315534571891 -0.2344041030201
0.7898958050933 0.4315534571891 1.8930117467493 -0.3292893001459
0.4060613360808 -0.2344041030201 -0.3292893001459 0.6179686100845
""".split(), float).reshape(4, 4)
mean_dmcd_r = np.array([1.7725, 2.2050, 1.5375, -0.0575])
mod = robcov.CovDetMCD(dta_hbk)
res = mod.fit(40, maxiter_step=100, reweight=False)
assert_allclose(res.mean, mean_dmcd_r, rtol=1e-5)
assert_allclose(res.cov, cov_dmcd_r, rtol=1e-5)
# with reweighting
# covMcd(x = hbk, nsamp = "deterministic", use.correction = FALSE)
# iBest: 5; C-step iterations: 7, 7, 7, 4, 6, 6
# Log(Det.): -2.42931967153
mean_dmcdw_r = np.array([1.5338983050847, 1.8322033898305, 1.6745762711864,
-0.0728813559322])
cov_dmcdw_r = np.array("""
1.5677744869295 0.09285770205078 0.252076010128 0.13873444408300
0.0928577020508 1.56769177397171 0.224929617385 -0.00516128856542
0.2520760101278 0.22492961738467 1.483829106079 -0.20275013775619
0.1387344440830 -0.00516128856542 -0.202750137756 0.43326701543885
""".split(), float).reshape(4, 4)
mod = robcov.CovDetMCD(dta_hbk)
res = mod.fit(40, maxiter_step=100) # default is reweight=True
assert_allclose(res.mean, mean_dmcdw_r, rtol=1e-5)
# R uses different trimming correction
# compare only shape (using trace for simplicity)
shape = res.cov / np.trace(res.cov)
shape_r = cov_dmcdw_r / np.trace(cov_dmcdw_r)
assert_allclose(shape, shape_r, rtol=1e-5) | .split(), float).reshape(4, 4)
mean_dmcd_r = np.array([1.7725, 2.2050, 1.5375, -0.0575])
mod = robcov.CovDetMCD(dta_hbk)
res = mod.fit(40, maxiter_step=100, reweight=False)
assert_allclose(res.mean, mean_dmcd_r, rtol=1e-5)
assert_allclose(res.cov, cov_dmcd_r, rtol=1e-5)
# with reweighting
# covMcd(x = hbk, nsamp = "deterministic", use.correction = FALSE)
# iBest: 5; C-step iterations: 7, 7, 7, 4, 6, 6
# Log(Det.): -2.42931967153
mean_dmcdw_r = np.array([1.5338983050847, 1.8322033898305, 1.6745762711864,
-0.0728813559322])
cov_dmcdw_r = np.array( | test_covdetmcd | python | statsmodels/statsmodels | statsmodels/robust/tests/test_covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tests/test_covariance.py | BSD-3-Clause |
def mean_func(x):
"""mean function for example"""
return x + 0.25 * x**2 | mean function for example | mean_func | python | statsmodels/statsmodels | statsmodels/robust/tests/test_mquantiles.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tests/test_mquantiles.py | BSD-3-Clause |
def std_func(x):
"""standard deviation function for example"""
return 0.1 * np.exp(2.5 + 0.75 * np.abs(x)) | standard deviation function for example | std_func | python | statsmodels/statsmodels | statsmodels/robust/tests/test_mquantiles.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tests/test_mquantiles.py | BSD-3-Clause |
def _shift_intercept(arr):
"""
A convenience function to make the SAS covariance matrix
compatible with statsmodels.rlm covariance
"""
arr = np.asarray(arr)
side = int(np.sqrt(len(arr)))
return np.roll(np.roll(arr.reshape(side, side), -1, axis=1), -1, axis=0) | A convenience function to make the SAS covariance matrix
compatible with statsmodels.rlm covariance | _shift_intercept | python | statsmodels/statsmodels | statsmodels/robust/tests/results/results_rlm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tests/results/results_rlm.py | BSD-3-Clause |
def _get_exogs(self):
"""list of exogs, for internal use in post-estimation
"""
return (self.exog, self.exog_infl) | list of exogs, for internal use in post-estimation | _get_exogs | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def loglike(self, params):
"""
Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model.
"""
return np.sum(self.loglikeobs(params)) | Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model. | loglike | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = (np.log(w[zero_idx] +
(1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))
llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]
return llf | Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n` | loglikeobs | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def score_obs(self, params):
"""
Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
# Unused, commented out
# mu = self.model_main.predict(params_main)
# TODO: need to allow for complex to use CS numerical derivatives
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx,:] = (score_main[zero_idx].T *
(1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
dldp[nonzero_idx,:] = score_main[nonzero_idx]
if self.inflation == 'logit':
dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *
(1 - w[zero_idx]) *
(1 - np.exp(llf_main[zero_idx])) /
np.exp(llf[zero_idx])).T
dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *
w[nonzero_idx]).T
elif self.inflation == 'probit':
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp)) | Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params` | score_obs | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def hessian(self, params):
"""
Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
"""
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if hess_arr_main is None or hess_arr_infl is None:
return approx_hess(params, self.loglike)
dim = self.k_exog + self.k_inflate
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate,:] = hess_arr_infl
hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr | Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
----- | hessian | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def predict(self, params, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', y_values=None):
"""
Predict expected response or other statistic given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor with coefficient
equal to 1. If exposure is specified, then it will be logged by
the method. The user does not need to log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
which : str (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' : the conditional expectation of endog E(y | x). This
takes inflated zeros into account.
- 'linear' : the linear predictor of the mean function.
- 'var' : returns the estimated variance of endog implied by the
model.
- 'mean-main' : mean of the main count model
- 'prob-main' : probability of selecting the main model.
The probability of zero inflation is ``1 - prob-main``.
- 'mean-nonzero' : expected value conditional on having observation
larger than zero, E(y | X, y>0)
- 'prob-zero' : probability of observing a zero count. P(y=0 | x)
- 'prob' : probabilities of each count from 0 to max(endog), or
for y_values if those are provided. This is a multivariate
return (2-dim when predicting for several observations).
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"``
"""
no_exog = False
if exog is None:
no_exog = True
exog = self.exog
if exog_infl is None:
if no_exog:
exog_infl = self.exog_infl
else:
if self._no_exog_infl:
exog_infl = np.ones((len(exog), 1))
else:
exog_infl = np.asarray(exog_infl)
if exog_infl.ndim == 1 and self.k_inflate == 1:
exog_infl = exog_infl[:, None]
if exposure is None:
if no_exog:
exposure = getattr(self, 'exposure', 0)
else:
exposure = 0
else:
exposure = np.log(exposure)
if offset is None:
if no_exog:
offset = getattr(self, 'offset', 0)
else:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)
lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset
# Refactor: This is pretty hacky,
# there should be an appropriate predict method in model_main
# this is just prob(y=0 | model_main)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', False)
tmp_exposure = getattr(self.model_main, 'exposure', False)
self.model_main.exog = exog
self.model_main.endog = np.zeros(exog.shape[0])
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
# tmp_offset might be an array with elementwise equality testing
#if np.size(tmp_offset) == 1 and tmp_offset[0] == 'no':
if tmp_offset is False:
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
#if np.size(tmp_exposure) == 1 and tmp_exposure[0] == 'no':
if tmp_exposure is False:
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
# end hack
prob_zero = (1 - prob_main) + prob_main * np.exp(llf)
if which == 'mean':
return prob_main * np.exp(lin_pred)
elif which == 'mean-main':
return np.exp(lin_pred)
elif which == 'linear':
return lin_pred
elif which == 'mean-nonzero':
return prob_main * np.exp(lin_pred) / (1 - prob_zero)
elif which == 'prob-zero':
return prob_zero
elif which == 'prob-main':
return prob_main
elif which == 'var':
mu = np.exp(lin_pred)
return self._predict_var(params, mu, 1 - prob_main)
elif which == 'prob':
return self._predict_prob(params, exog, exog_infl, exposure,
offset, y_values=y_values)
else:
raise ValueError('which = %s is not available' % which) | Predict expected response or other statistic given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor with coefficient
equal to 1. If exposure is specified, then it will be logged by
the method. The user does not need to log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
which : str (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' : the conditional expectation of endog E(y | x). This
takes inflated zeros into account.
- 'linear' : the linear predictor of the mean function.
- 'var' : returns the estimated variance of endog implied by the
model.
- 'mean-main' : mean of the main count model
- 'prob-main' : probability of selecting the main model.
The probability of zero inflation is ``1 - prob-main``.
- 'mean-nonzero' : expected value conditional on having observation
larger than zero, E(y | X, y>0)
- 'prob-zero' : probability of observing a zero count. P(y=0 | x)
- 'prob' : probabilities of each count from 0 to max(endog), or
for y_values if those are provided. This is a multivariate
return (2-dim when predicting for several observations).
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"`` | predict | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""NotImplemented
"""
raise NotImplementedError | NotImplemented | _derivative_predict | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None):
"""NotImplemented
"""
raise NotImplementedError | NotImplemented | _derivative_exog | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main)
score_infl = self.model_infl._deriv_mean_dparams(params_infl)
score_main = self.model_main._deriv_mean_dparams(params_main)
dmat_infl = - mu[:, None] * score_infl
dmat_main = (1 - w[:, None]) * score_main
dmat = np.column_stack((dmat_infl, dmat_main))
return dmat | Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector. | _deriv_mean_dparams | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
raise NotImplementedError
# The below currently does not work, discontinuity at zero
# see https://github.com/statsmodels/statsmodels/pull/7951#issuecomment-996355875 # noqa
from statsmodels.tools.numdiff import _approx_fprime_scalar
endog_original = self.endog
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
self.endog = y
self.model_main.endog = y
sf = self.score_obs(params)
self.endog = endog_original
self.model_main.endog = endog_original
return sf
ds = _approx_fprime_scalar(self.endog[:, None], f, epsilon=1e-2)
return ds | derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog. | _deriv_score_obs_dendog | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
w = prob_infl
var_ = (1 - w) * mu * (1 + w * mu)
return var_ | predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance. | _predict_var | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
"""Get frozen instance of distribution based on predicted parameters.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor of the mean
function with coefficient equal to 1. If exposure is specified,
then it will be logged by the method. The user does not need to
log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
Returns
-------
Instance of frozen scipy distribution subclass.
"""
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
# distr = self.distribution(mu[:, None], 1 - w[:, None])
distr = self.distribution(mu, 1 - w)
return distr | Get frozen instance of distribution based on predicted parameters.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor of the mean
function with coefficient equal to 1. If exposure is specified,
then it will be logged by the method. The user does not need to
log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
Returns
-------
Instance of frozen scipy distribution subclass. | get_distribution | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * ((1 + alpha * mu**p)**2 + w * mu)
return var_ | predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance. | _predict_var | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * (1 + alpha * mu**(p - 1) + w * mu)
return var_ | predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance. | _predict_var | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def get_influence(self):
"""
Influence and outlier measures
See notes section for influence measures that do not apply for
zero inflated models.
Returns
-------
MLEInfluence
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
Notes
-----
ZeroInflated models have functions that are not differentiable
with respect to sample endog if endog=0. This means that generalized
leverage cannot be computed in the usual definition.
Currently, both the generalized leverage, in `hat_matrix_diag`
attribute and studetized residuals are not available. In the influence
plot generalized leverage is replaced by a hat matrix diagonal that
only takes combined exog into account, computed in the same way as
for OLS. This is a measure for exog outliers but does not take
specific features of the model into account.
"""
# same as sumper in DiscreteResults, only added for docstring
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self) | Influence and outlier measures
See notes section for influence measures that do not apply for
zero inflated models.
Returns
-------
MLEInfluence
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
Notes
-----
ZeroInflated models have functions that are not differentiable
with respect to sample endog if endog=0. This means that generalized
leverage cannot be computed in the usual definition.
Currently, both the generalized leverage, in `hat_matrix_diag`
attribute and studetized residuals are not available. In the influence
plot generalized leverage is replaced by a hat matrix diagonal that
only takes combined exog into account, computed in the same way as
for OLS. This is a measure for exog outliers but does not take
specific features of the model into account. | get_influence | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation") | Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models | get_margeff | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation") | Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models | get_margeff | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation") | Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models | get_margeff | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def _combine_bins(edge_index, x):
"""group columns into bins using sum
This is mainly a helper function for combining probabilities into cells.
It similar to `np.add.reduceat(x, edge_index, axis=-1)` except for the
treatment of the last index and last cell.
Parameters
----------
edge_index : array_like
This defines the (zero-based) indices for the columns that are be
combined. Each index in `edge_index` except the last is the starting
index for a bin. The largest index in a bin is the next edge_index-1.
x : 1d or 2d array
array for which columns are combined. If x is 1-dimensional that it
will be treated as a 2-d row vector.
Returns
-------
x_new : ndarray
k_li : ndarray
Count of columns combined in bin.
Examples
--------
>>> dia.combine_bins([0,1,5], np.arange(4))
(array([0, 6]), array([1, 4]))
this aggregates to two bins with the sum of 1 and 4 elements
>>> np.arange(4)[0].sum()
0
>>> np.arange(4)[1:5].sum()
6
If the rightmost index is smaller than len(x)+1, then the remaining
columns will not be included.
>>> dia.combine_bins([0,1,3], np.arange(4))
(array([0, 3]), array([1, 2]))
"""
x = np.asarray(x)
if x.ndim == 1:
is_1d = True
x = x[None, :]
else:
is_1d = False
xli = []
kli = []
for bin_idx in range(len(edge_index) - 1):
i, j = edge_index[bin_idx : bin_idx + 2]
xli.append(x[:, i:j].sum(1))
kli.append(j - i)
x_new = np.column_stack(xli)
if is_1d:
x_new = x_new.squeeze()
return x_new, np.asarray(kli) | group columns into bins using sum
This is mainly a helper function for combining probabilities into cells.
It similar to `np.add.reduceat(x, edge_index, axis=-1)` except for the
treatment of the last index and last cell.
Parameters
----------
edge_index : array_like
This defines the (zero-based) indices for the columns that are be
combined. Each index in `edge_index` except the last is the starting
index for a bin. The largest index in a bin is the next edge_index-1.
x : 1d or 2d array
array for which columns are combined. If x is 1-dimensional that it
will be treated as a 2-d row vector.
Returns
-------
x_new : ndarray
k_li : ndarray
Count of columns combined in bin.
Examples
--------
>>> dia.combine_bins([0,1,5], np.arange(4))
(array([0, 6]), array([1, 4]))
this aggregates to two bins with the sum of 1 and 4 elements
>>> np.arange(4)[0].sum()
0
>>> np.arange(4)[1:5].sum()
6
If the rightmost index is smaller than len(x)+1, then the remaining
columns will not be included.
>>> dia.combine_bins([0,1,3], np.arange(4))
(array([0, 3]), array([1, 2])) | _combine_bins | python | statsmodels/statsmodels | statsmodels/discrete/_diagnostics_count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/_diagnostics_count.py | BSD-3-Clause |
def plot_probs(freq, probs_predicted, label='predicted', upp_xlim=None,
fig=None):
"""diagnostic plots for comparing two lists of discrete probabilities
Parameters
----------
freq, probs_predicted : nd_arrays
two arrays of probabilities, this can be any probabilities for
the same events, default is designed for comparing predicted
and observed probabilities
label : str or tuple
If string, then it will be used as the label for probs_predicted and
"freq" is used for the other probabilities.
If label is a tuple of strings, then the first is they are used as
label for both probabilities
upp_xlim : None or int
If it is not None, then the xlim of the first two plots are set to
(0, upp_xlim), otherwise the matplotlib default is used
fig : None or matplotlib figure instance
If fig is provided, then the axes will be added to it in a (3,1)
subplots, otherwise a matplotlib figure instance is created
Returns
-------
Figure
The figure contains 3 subplot with probabilities, cumulative
probabilities and a PP-plot
"""
if isinstance(label, list):
label0, label1 = label
else:
label0, label1 = 'freq', label
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8,12))
ax1 = fig.add_subplot(311)
ax1.plot(freq, '-o', label=label0)
ax1.plot(probs_predicted, '-d', label=label1)
if upp_xlim is not None:
ax1.set_xlim(0, upp_xlim)
ax1.legend()
ax1.set_title('probabilities')
ax2 = fig.add_subplot(312)
ax2.plot(np.cumsum(freq), '-o', label=label0)
ax2.plot(np.cumsum(probs_predicted), '-d', label=label1)
if upp_xlim is not None:
ax2.set_xlim(0, upp_xlim)
ax2.legend()
ax2.set_title('cumulative probabilities')
ax3 = fig.add_subplot(313)
ax3.plot(np.cumsum(probs_predicted), np.cumsum(freq), 'o')
ax3.plot(np.arange(len(freq)) / len(freq), np.arange(len(freq)) / len(freq))
ax3.set_title('PP-plot')
ax3.set_xlabel(label1)
ax3.set_ylabel(label0)
return fig | diagnostic plots for comparing two lists of discrete probabilities
Parameters
----------
freq, probs_predicted : nd_arrays
two arrays of probabilities, this can be any probabilities for
the same events, default is designed for comparing predicted
and observed probabilities
label : str or tuple
If string, then it will be used as the label for probs_predicted and
"freq" is used for the other probabilities.
If label is a tuple of strings, then the first is they are used as
label for both probabilities
upp_xlim : None or int
If it is not None, then the xlim of the first two plots are set to
(0, upp_xlim), otherwise the matplotlib default is used
fig : None or matplotlib figure instance
If fig is provided, then the axes will be added to it in a (3,1)
subplots, otherwise a matplotlib figure instance is created
Returns
-------
Figure
The figure contains 3 subplot with probabilities, cumulative
probabilities and a PP-plot | plot_probs | python | statsmodels/statsmodels | statsmodels/discrete/_diagnostics_count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/_diagnostics_count.py | BSD-3-Clause |
def test_chisquare_prob(results, probs, bin_edges=None, method=None):
"""
chisquare test for predicted probabilities using cmt-opg
Parameters
----------
results : results instance
Instance of a count regression results
probs : ndarray
Array of predicted probabilities with observations
in rows and event counts in columns
bin_edges : None or array
intervals to combine several counts into cells
see combine_bins
Returns
-------
(api not stable, replace by test-results class)
statistic : float
chisquare statistic for tes
p-value : float
p-value of test
df : int
degrees of freedom for chisquare distribution
extras : ???
currently returns a tuple with some intermediate results
(diff, res_aux)
Notes
-----
Status : experimental, no verified unit tests, needs to be generalized
currently only OPG version with auxiliary regression is implemented
Assumes counts are np.arange(probs.shape[1]), i.e. consecutive
integers starting at zero.
Auxiliary regression drops the last column of binned probs to avoid
that probabilities sum to 1.
References
----------
.. [1] Andrews, Donald W. K. 1988a. “Chi-Square Diagnostic Tests for
Econometric Models: Theory.” Econometrica 56 (6): 1419–53.
https://doi.org/10.2307/1913105.
.. [2] Andrews, Donald W. K. 1988b. “Chi-Square Diagnostic Tests for
Econometric Models.” Journal of Econometrics 37 (1): 135–56.
https://doi.org/10.1016/0304-4076(88)90079-6.
.. [3] Manjón, M., and O. Martínez. 2014. “The Chi-Squared Goodness-of-Fit
Test for Count-Data Models.” Stata Journal 14 (4): 798–816.
"""
res = results
score_obs = results.model.score_obs(results.params)
d_ind = (res.model.endog[:, None] == np.arange(probs.shape[1])).astype(int)
if bin_edges is not None:
d_ind_bins, k_bins = _combine_bins(bin_edges, d_ind)
probs_bins, k_bins = _combine_bins(bin_edges, probs)
k_bins = probs_bins.shape[-1]
else:
d_ind_bins, k_bins = d_ind, d_ind.shape[1]
probs_bins = probs
diff1 = d_ind_bins - probs_bins
# diff2 = (1 - d_ind.sum(1)) - (1 - probs_bins.sum(1))
x_aux = np.column_stack((score_obs, diff1[:, :-1])) # diff2))
nobs = x_aux.shape[0]
res_aux = OLS(np.ones(nobs), x_aux).fit()
chi2_stat = nobs * (1 - res_aux.ssr / res_aux.uncentered_tss)
df = res_aux.model.rank - score_obs.shape[1]
if df < k_bins - 1:
# not a problem in general, but it can be for OPG version
import warnings
# TODO: Warning shows up in Monte Carlo loop, skip for now
warnings.warn('auxiliary model is rank deficient')
statistic = chi2_stat
pvalue = stats.chi2.sf(chi2_stat, df)
res = HolderTuple(
statistic=statistic,
pvalue=pvalue,
df=df,
diff1=diff1,
res_aux=res_aux,
distribution="chi2",
)
return res | chisquare test for predicted probabilities using cmt-opg
Parameters
----------
results : results instance
Instance of a count regression results
probs : ndarray
Array of predicted probabilities with observations
in rows and event counts in columns
bin_edges : None or array
intervals to combine several counts into cells
see combine_bins
Returns
-------
(api not stable, replace by test-results class)
statistic : float
chisquare statistic for tes
p-value : float
p-value of test
df : int
degrees of freedom for chisquare distribution
extras : ???
currently returns a tuple with some intermediate results
(diff, res_aux)
Notes
-----
Status : experimental, no verified unit tests, needs to be generalized
currently only OPG version with auxiliary regression is implemented
Assumes counts are np.arange(probs.shape[1]), i.e. consecutive
integers starting at zero.
Auxiliary regression drops the last column of binned probs to avoid
that probabilities sum to 1.
References
----------
.. [1] Andrews, Donald W. K. 1988a. “Chi-Square Diagnostic Tests for
Econometric Models: Theory.” Econometrica 56 (6): 1419–53.
https://doi.org/10.2307/1913105.
.. [2] Andrews, Donald W. K. 1988b. “Chi-Square Diagnostic Tests for
Econometric Models.” Journal of Econometrics 37 (1): 135–56.
https://doi.org/10.1016/0304-4076(88)90079-6.
.. [3] Manjón, M., and O. Martínez. 2014. “The Chi-Squared Goodness-of-Fit
Test for Count-Data Models.” Stata Journal 14 (4): 798–816. | test_chisquare_prob | python | statsmodels/statsmodels | statsmodels/discrete/_diagnostics_count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/_diagnostics_count.py | BSD-3-Clause |
def test_poisson_dispersion(results, method="all", _old=False):
"""Score/LM type tests for Poisson variance assumptions
Null Hypothesis is
H0: var(y) = E(y) and assuming E(y) is correctly specified
H1: var(y) ~= E(y)
The tests are based on the constrained model, i.e. the Poisson model.
The tests differ in their assumed alternatives, and in their maintained
assumptions.
Parameters
----------
results : Poisson results instance
This can be a results instance for either a discrete Poisson or a GLM
with family Poisson.
method : str
Not used yet. Currently results for all methods are returned.
_old : bool
Temporary keyword for backwards compatibility, will be removed
in future version of statsmodels.
Returns
-------
res : instance
The instance of DispersionResults has the hypothesis test results,
statistic, pvalue, method, alternative, as main attributes and a
summary_frame method that returns the results as pandas DataFrame.
"""
if method not in ["all"]:
raise ValueError(f'unknown method "{method}"')
if hasattr(results, '_results'):
results = results._results
endog = results.model.endog
nobs = endog.shape[0] # TODO: use attribute, may need to be added
fitted = results.predict()
# fitted = results.fittedvalues # discrete has linear prediction
# this assumes Poisson
resid2 = results.resid_response**2
var_resid_endog = (resid2 - endog)
var_resid_fitted = (resid2 - fitted)
std1 = np.sqrt(2 * (fitted**2).sum())
var_resid_endog_sum = var_resid_endog.sum()
dean_a = var_resid_fitted.sum() / std1
dean_b = var_resid_endog_sum / std1
dean_c = (var_resid_endog / fitted).sum() / np.sqrt(2 * nobs)
pval_dean_a = 2 * stats.norm.sf(np.abs(dean_a))
pval_dean_b = 2 * stats.norm.sf(np.abs(dean_b))
pval_dean_c = 2 * stats.norm.sf(np.abs(dean_c))
results_all = [[dean_a, pval_dean_a],
[dean_b, pval_dean_b],
[dean_c, pval_dean_c]]
description = [['Dean A', 'mu (1 + a mu)'],
['Dean B', 'mu (1 + a mu)'],
['Dean C', 'mu (1 + a)']]
# Cameron Trived auxiliary regression page 78 count book 1989
endog_v = var_resid_endog / fitted
res_ols_nb2 = OLS(endog_v, fitted).fit(use_t=False)
stat_ols_nb2 = res_ols_nb2.tvalues[0]
pval_ols_nb2 = res_ols_nb2.pvalues[0]
results_all.append([stat_ols_nb2, pval_ols_nb2])
description.append(['CT nb2', 'mu (1 + a mu)'])
res_ols_nb1 = OLS(endog_v, fitted).fit(use_t=False)
stat_ols_nb1 = res_ols_nb1.tvalues[0]
pval_ols_nb1 = res_ols_nb1.pvalues[0]
results_all.append([stat_ols_nb1, pval_ols_nb1])
description.append(['CT nb1', 'mu (1 + a)'])
endog_v = var_resid_endog / fitted
res_ols_nb2 = OLS(endog_v, fitted).fit(cov_type='HC3', use_t=False)
stat_ols_hc1_nb2 = res_ols_nb2.tvalues[0]
pval_ols_hc1_nb2 = res_ols_nb2.pvalues[0]
results_all.append([stat_ols_hc1_nb2, pval_ols_hc1_nb2])
description.append(['CT nb2 HC3', 'mu (1 + a mu)'])
res_ols_nb1 = OLS(endog_v, np.ones(len(endog_v))).fit(cov_type='HC3',
use_t=False)
stat_ols_hc1_nb1 = res_ols_nb1.tvalues[0]
pval_ols_hc1_nb1 = res_ols_nb1.pvalues[0]
results_all.append([stat_ols_hc1_nb1, pval_ols_hc1_nb1])
description.append(['CT nb1 HC3', 'mu (1 + a)'])
results_all = np.array(results_all)
if _old:
# for backwards compatibility in 0.14, remove in later versions
return results_all, description
else:
res = DispersionResults(
statistic=results_all[:, 0],
pvalue=results_all[:, 1],
method=[i[0] for i in description],
alternative=[i[1] for i in description],
name="Poisson Dispersion Test"
)
return res | Score/LM type tests for Poisson variance assumptions
Null Hypothesis is
H0: var(y) = E(y) and assuming E(y) is correctly specified
H1: var(y) ~= E(y)
The tests are based on the constrained model, i.e. the Poisson model.
The tests differ in their assumed alternatives, and in their maintained
assumptions.
Parameters
----------
results : Poisson results instance
This can be a results instance for either a discrete Poisson or a GLM
with family Poisson.
method : str
Not used yet. Currently results for all methods are returned.
_old : bool
Temporary keyword for backwards compatibility, will be removed
in future version of statsmodels.
Returns
-------
res : instance
The instance of DispersionResults has the hypothesis test results,
statistic, pvalue, method, alternative, as main attributes and a
summary_frame method that returns the results as pandas DataFrame. | test_poisson_dispersion | python | statsmodels/statsmodels | statsmodels/discrete/_diagnostics_count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/_diagnostics_count.py | BSD-3-Clause |
def _test_poisson_dispersion_generic(
results,
exog_new_test,
exog_new_control=None,
include_score=False,
use_endog=True,
cov_type='HC3',
cov_kwds=None,
use_t=False
):
"""A variable addition test for the variance function
This uses an artificial regression to calculate a variant of an LM or
generalized score test for the specification of the variance assumption
in a Poisson model. The performed test is a Wald test on the coefficients
of the `exog_new_test`.
Warning: insufficiently tested, especially for options
"""
if hasattr(results, '_results'):
results = results._results
endog = results.model.endog
nobs = endog.shape[0] # TODO: use attribute, may need to be added
# fitted = results.fittedvalues # generic has linpred as fittedvalues
fitted = results.predict()
resid2 = results.resid_response**2
# the following assumes Poisson
if use_endog:
var_resid = (resid2 - endog)
else:
var_resid = (resid2 - fitted)
endog_v = var_resid / fitted
k_constraints = exog_new_test.shape[1]
ex_list = [exog_new_test]
if include_score:
score_obs = results.model.score_obs(results.params)
ex_list.append(score_obs)
if exog_new_control is not None:
ex_list.append(score_obs)
if len(ex_list) > 1:
ex = np.column_stack(ex_list)
use_wald = True
else:
ex = ex_list[0] # no control variables in exog
use_wald = False
res_ols = OLS(endog_v, ex).fit(cov_type=cov_type, cov_kwds=cov_kwds,
use_t=use_t)
if use_wald:
# we have controls and need to test coefficients
k_vars = ex.shape[1]
constraints = np.eye(k_constraints, k_vars)
ht = res_ols.wald_test(constraints)
stat_ols = ht.statistic
pval_ols = ht.pvalue
else:
# we do not have controls and can use overall fit
nobs = endog_v.shape[0]
rsquared_noncentered = 1 - res_ols.ssr/res_ols.uncentered_tss
stat_ols = nobs * rsquared_noncentered
pval_ols = stats.chi2.sf(stat_ols, k_constraints)
return stat_ols, pval_ols | A variable addition test for the variance function
This uses an artificial regression to calculate a variant of an LM or
generalized score test for the specification of the variance assumption
in a Poisson model. The performed test is a Wald test on the coefficients
of the `exog_new_test`.
Warning: insufficiently tested, especially for options | _test_poisson_dispersion_generic | python | statsmodels/statsmodels | statsmodels/discrete/_diagnostics_count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/_diagnostics_count.py | BSD-3-Clause |
def test_poisson_zeroinflation_jh(results_poisson, exog_infl=None):
"""score test for zero inflation or deflation in Poisson
This implements Jansakul and Hinde 2009 score test
for excess zeros against a zero modified Poisson
alternative. They use a linear link function for the
inflation model to allow for zero deflation.
Parameters
----------
results_poisson: results instance
The test is only valid if the results instance is a Poisson
model.
exog_infl : ndarray
Explanatory variables for the zero inflated or zero modified
alternative. I exog_infl is None, then the inflation
probability is assumed to be constant.
Returns
-------
score test results based on chisquare distribution
Notes
-----
This is a score test based on the null hypothesis that
the true model is Poisson. It will also reject for
other deviations from a Poisson model if those affect
the zero probabilities, e.g. in the direction of
excess dispersion as in the Negative Binomial
or Generalized Poisson model.
Therefore, rejection in this test does not imply that
zero-inflated Poisson is the appropriate model.
Status: experimental, no verified unit tests,
TODO: If the zero modification probability is assumed
to be constant under the alternative, then we only have
a scalar test score and we can use one-sided tests to
distinguish zero inflation and deflation from the
two-sided deviations. (The general one-sided case is
difficult.)
In this case the test specializes to the test by Broek
References
----------
.. [1] Jansakul, N., and J. P. Hinde. 2002. “Score Tests for Zero-Inflated
Poisson Models.” Computational Statistics & Data Analysis 40 (1):
75–96. https://doi.org/10.1016/S0167-9473(01)00104-9.
"""
if not isinstance(results_poisson.model, Poisson):
# GLM Poisson would be also valid, not tried
import warnings
warnings.warn('Test is only valid if model is Poisson')
nobs = results_poisson.model.endog.shape[0]
if exog_infl is None:
exog_infl = np.ones((nobs, 1))
endog = results_poisson.model.endog
exog = results_poisson.model.exog
mu = results_poisson.predict()
prob_zero = np.exp(-mu)
cov_poi = results_poisson.cov_params()
cross_derivative = (exog_infl.T * (-mu)).dot(exog).T
cov_infl = (exog_infl.T * ((1 - prob_zero) / prob_zero)).dot(exog_infl)
score_obs_infl = exog_infl * (((endog == 0) - prob_zero) / prob_zero)[:,None]
#score_obs_infl = exog_infl * ((endog == 0) * (1 - prob_zero) / prob_zero - (endog>0))[:,None] #same
score_infl = score_obs_infl.sum(0)
cov_score_infl = cov_infl - cross_derivative.T.dot(cov_poi).dot(cross_derivative)
cov_score_infl_inv = np.linalg.pinv(cov_score_infl)
statistic = score_infl.dot(cov_score_infl_inv).dot(score_infl)
df2 = np.linalg.matrix_rank(cov_score_infl) # more general, maybe not needed
df = exog_infl.shape[1]
pvalue = stats.chi2.sf(statistic, df)
res = HolderTuple(
statistic=statistic,
pvalue=pvalue,
df=df,
rank_score=df2,
distribution="chi2",
)
return res | score test for zero inflation or deflation in Poisson
This implements Jansakul and Hinde 2009 score test
for excess zeros against a zero modified Poisson
alternative. They use a linear link function for the
inflation model to allow for zero deflation.
Parameters
----------
results_poisson: results instance
The test is only valid if the results instance is a Poisson
model.
exog_infl : ndarray
Explanatory variables for the zero inflated or zero modified
alternative. I exog_infl is None, then the inflation
probability is assumed to be constant.
Returns
-------
score test results based on chisquare distribution
Notes
-----
This is a score test based on the null hypothesis that
the true model is Poisson. It will also reject for
other deviations from a Poisson model if those affect
the zero probabilities, e.g. in the direction of
excess dispersion as in the Negative Binomial
or Generalized Poisson model.
Therefore, rejection in this test does not imply that
zero-inflated Poisson is the appropriate model.
Status: experimental, no verified unit tests,
TODO: If the zero modification probability is assumed
to be constant under the alternative, then we only have
a scalar test score and we can use one-sided tests to
distinguish zero inflation and deflation from the
two-sided deviations. (The general one-sided case is
difficult.)
In this case the test specializes to the test by Broek
References
----------
.. [1] Jansakul, N., and J. P. Hinde. 2002. “Score Tests for Zero-Inflated
Poisson Models.” Computational Statistics & Data Analysis 40 (1):
75–96. https://doi.org/10.1016/S0167-9473(01)00104-9. | test_poisson_zeroinflation_jh | python | statsmodels/statsmodels | statsmodels/discrete/_diagnostics_count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/_diagnostics_count.py | BSD-3-Clause |
def test_poisson_zeroinflation_broek(results_poisson):
"""score test for zero modification in Poisson, special case
This assumes that the Poisson model has a constant and that
the zero modification probability is constant.
This is a special case of test_poisson_zeroinflation derived by
van den Broek 1995.
The test reports two sided and one sided alternatives based on
the normal distribution of the test statistic.
References
----------
.. [1] Broek, Jan van den. 1995. “A Score Test for Zero Inflation in a
Poisson Distribution.” Biometrics 51 (2): 738–43.
https://doi.org/10.2307/2532959.
"""
mu = results_poisson.predict()
prob_zero = np.exp(-mu)
endog = results_poisson.model.endog
# nobs = len(endog)
# score = ((endog == 0) / prob_zero).sum() - nobs
# var_score = (1 / prob_zero).sum() - nobs - endog.sum()
score = (((endog == 0) - prob_zero) / prob_zero).sum()
var_score = ((1 - prob_zero) / prob_zero).sum() - endog.sum()
statistic = score / np.sqrt(var_score)
pvalue_two = 2 * stats.norm.sf(np.abs(statistic))
pvalue_upp = stats.norm.sf(statistic)
pvalue_low = stats.norm.cdf(statistic)
res = HolderTuple(
statistic=statistic,
pvalue=pvalue_two,
pvalue_smaller=pvalue_upp,
pvalue_larger=pvalue_low,
chi2=statistic**2,
pvalue_chi2=stats.chi2.sf(statistic**2, 1),
df_chi2=1,
distribution="normal",
)
return res | score test for zero modification in Poisson, special case
This assumes that the Poisson model has a constant and that
the zero modification probability is constant.
This is a special case of test_poisson_zeroinflation derived by
van den Broek 1995.
The test reports two sided and one sided alternatives based on
the normal distribution of the test statistic.
References
----------
.. [1] Broek, Jan van den. 1995. “A Score Test for Zero Inflation in a
Poisson Distribution.” Biometrics 51 (2): 738–43.
https://doi.org/10.2307/2532959. | test_poisson_zeroinflation_broek | python | statsmodels/statsmodels | statsmodels/discrete/_diagnostics_count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/_diagnostics_count.py | BSD-3-Clause |
def test_poisson_zeros(results):
"""Test for excess zeros in Poisson regression model.
The test is implemented following Tang and Tang [1]_ equ. (12) which is
based on the test derived in He et al 2019 [2]_.
References
----------
.. [1] Tang, Yi, and Wan Tang. 2018. “Testing Modified Zeros for Poisson
Regression Models:” Statistical Methods in Medical Research,
September. https://doi.org/10.1177/0962280218796253.
.. [2] He, Hua, Hui Zhang, Peng Ye, and Wan Tang. 2019. “A Test of Inflated
Zeros for Poisson Regression Models.” Statistical Methods in
Medical Research 28 (4): 1157–69.
https://doi.org/10.1177/0962280217749991.
"""
x = results.model.exog
mean = results.predict()
prob0 = np.exp(-mean)
counts = (results.model.endog == 0).astype(int)
diff = counts.sum() - prob0.sum()
var1 = prob0 @ (1 - prob0)
pm = prob0 * mean
c = np.linalg.inv(x.T * mean @ x)
pmx = pm @ x
var2 = pmx @ c @ pmx
var = var1 - var2
statistic = diff / np.sqrt(var)
pvalue_two = 2 * stats.norm.sf(np.abs(statistic))
pvalue_upp = stats.norm.sf(statistic)
pvalue_low = stats.norm.cdf(statistic)
res = HolderTuple(
statistic=statistic,
pvalue=pvalue_two,
pvalue_smaller=pvalue_upp,
pvalue_larger=pvalue_low,
chi2=statistic**2,
pvalue_chi2=stats.chi2.sf(statistic**2, 1),
df_chi2=1,
distribution="normal",
)
return res | Test for excess zeros in Poisson regression model.
The test is implemented following Tang and Tang [1]_ equ. (12) which is
based on the test derived in He et al 2019 [2]_.
References
----------
.. [1] Tang, Yi, and Wan Tang. 2018. “Testing Modified Zeros for Poisson
Regression Models:” Statistical Methods in Medical Research,
September. https://doi.org/10.1177/0962280218796253.
.. [2] He, Hua, Hui Zhang, Peng Ye, and Wan Tang. 2019. “A Test of Inflated
Zeros for Poisson Regression Models.” Statistical Methods in
Medical Research 28 (4): 1157–69.
https://doi.org/10.1177/0962280217749991. | test_poisson_zeros | python | statsmodels/statsmodels | statsmodels/discrete/_diagnostics_count.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/_diagnostics_count.py | BSD-3-Clause |
def test_chisquare_prob(self, bin_edges=None, method=None):
"""Moment test for binned probabilites using OPG.
Paramters
---------
binedges : array_like or None
This defines which counts are included in the test on frequencies
and how counts are combined in bins.
The default if bin_edges is None will change in future.
See Notes and Example sections below.
method : str
Currently only `method = "opg"` is available.
If method is None, the OPG will be used, but the default might
change in future versions.
See Notes section below.
Returns
-------
test result
Notes
-----
Warning: The current default can have many empty or nearly empty bins.
The default number of bins is given by max(endog).
Currently it is recommended to limit the number of bins explicitly,
see Examples below.
Binning will change in future and automatic binning will be added.
Currently only the outer product of gradient, OPG, method is
implemented. In many case, the OPG version of a specification test
overrejects in small samples.
Specialized tests that use observed or expected information matrix
often have better small sample properties.
The default method will change if better methods are added.
Examples
--------
The following call is a test for the probability of zeros
`test_chisquare_prob(bin_edges=np.arange(3))`
`test_chisquare_prob(bin_edges=np.arange(10))` tests the hypothesis
that the frequencies for counts up to 7 correspond to the estimated
Poisson distributions.
In this case, edges are 0, ..., 9 which defines 9 bins for
counts 0 to 8. The last bin is dropped, so the joint test hypothesis is
that the observed aggregated frequencies for counts 0 to 7 correspond
to the model prediction for those frequencies. Predicted probabilites
Prob(y_i = k | x) are aggregated over observations ``i``.
"""
kwds = {}
if bin_edges is not None:
# TODO: verify upper bound, we drop last bin (may be open, inf)
kwds["y_values"] = np.arange(bin_edges[-2] + 1)
probs = self.results.predict(which="prob", **kwds)
res = test_chisquare_prob(self.results, probs, bin_edges=bin_edges,
method=method)
return res | Moment test for binned probabilites using OPG.
Paramters
---------
binedges : array_like or None
This defines which counts are included in the test on frequencies
and how counts are combined in bins.
The default if bin_edges is None will change in future.
See Notes and Example sections below.
method : str
Currently only `method = "opg"` is available.
If method is None, the OPG will be used, but the default might
change in future versions.
See Notes section below.
Returns
-------
test result
Notes
-----
Warning: The current default can have many empty or nearly empty bins.
The default number of bins is given by max(endog).
Currently it is recommended to limit the number of bins explicitly,
see Examples below.
Binning will change in future and automatic binning will be added.
Currently only the outer product of gradient, OPG, method is
implemented. In many case, the OPG version of a specification test
overrejects in small samples.
Specialized tests that use observed or expected information matrix
often have better small sample properties.
The default method will change if better methods are added.
Examples
--------
The following call is a test for the probability of zeros
`test_chisquare_prob(bin_edges=np.arange(3))`
`test_chisquare_prob(bin_edges=np.arange(10))` tests the hypothesis
that the frequencies for counts up to 7 correspond to the estimated
Poisson distributions.
In this case, edges are 0, ..., 9 which defines 9 bins for
counts 0 to 8. The last bin is dropped, so the joint test hypothesis is
that the observed aggregated frequencies for counts 0 to 7 correspond
to the model prediction for those frequencies. Predicted probabilites
Prob(y_i = k | x) are aggregated over observations ``i``. | test_chisquare_prob | python | statsmodels/statsmodels | statsmodels/discrete/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/diagnostic.py | BSD-3-Clause |
def plot_probs(self, label='predicted', upp_xlim=None,
fig=None):
"""Plot observed versus predicted frequencies for entire sample.
"""
probs_predicted = self.probs_predicted.sum(0)
k_probs = len(probs_predicted)
freq = np.bincount(self.results.model.endog.astype(int),
minlength=k_probs)[:k_probs]
fig = plot_probs(freq, probs_predicted,
label=label, upp_xlim=upp_xlim,
fig=fig)
return fig | Plot observed versus predicted frequencies for entire sample. | plot_probs | python | statsmodels/statsmodels | statsmodels/discrete/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/diagnostic.py | BSD-3-Clause |
def test_dispersion(self):
"""Test for excess (over or under) dispersion in Poisson.
Returns
-------
dispersion results
"""
res = test_poisson_dispersion(self.results)
return res | Test for excess (over or under) dispersion in Poisson.
Returns
-------
dispersion results | test_dispersion | python | statsmodels/statsmodels | statsmodels/discrete/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/diagnostic.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.