code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def tuning_s_estimator_mean(norm, breakdown=None):
"""Tuning parameter and scale bias correction for S-estimators of mean.
The reference distribution is the normal distribution.
This requires a (hard) redescending norm, i.e. with finite max rho.
Parameters
----------
norm : instance of RobustNorm subclass
breakdown : float or iterable of float in (0, 0.5]
Desired breakdown point between 0 and 0.5.
Default if breakdown is None is a list of breakdown points.
Returns
-------
Holder instance with the following attributes :
- `breakdown` : breakdown point
- `eff` : relative efficiency
- `param` : tuning parameter for norm
- `scale_bias` : correction term for Fisher consistency.
Notes
-----
Based on Rousseeuw and Leroy (1987). See table 19, p. 142 that can be
replicated by this function for TukeyBiweight norm.
Note, the results of this function are based computation without rounding
to decimal precision, and differ in some cases in the last digit from
the table by Rousseeuw and Leroy.
Numerical expectation and root finding based on scipy integrate and
optimize.
TODO: more options for details, numeric approximation and root finding.
There is currently no feasibility check in functions.
Reference
---------
Rousseeuw and Leroy book
"""
if breakdown is None:
bps = [0.5, 0.45, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.1, 0.05]
else:
# allow for scalar bp
try:
_ = iter(breakdown)
bps = breakdown
except TypeError:
bps = [breakdown]
def func(c):
norm_ = norm
norm_._set_tuning_param(c, inplace=True)
bp = stats.norm.expect(lambda x: norm_.rho(x)) / norm_.max_rho()
return bp
res = []
for bp in bps:
c_bp = optimize.brentq(lambda c0: func(c0) - bp, 0.1, 10)
norm._set_tuning_param(c_bp, inplace=True) # inplace modification
eff = 1 / _var_normal(norm)
b = stats.norm.expect(lambda x : norm.rho(x))
res.append([bp, eff, c_bp, b])
if np.size(bps) > 1:
res = np.asarray(res).T
else:
# use one list
res = res[0]
res2 = Holder(
breakdown=res[0],
eff=res[1],
param=res[2],
scale_bias=res[3],
all=res,
)
return res2 | Tuning parameter and scale bias correction for S-estimators of mean.
The reference distribution is the normal distribution.
This requires a (hard) redescending norm, i.e. with finite max rho.
Parameters
----------
norm : instance of RobustNorm subclass
breakdown : float or iterable of float in (0, 0.5]
Desired breakdown point between 0 and 0.5.
Default if breakdown is None is a list of breakdown points.
Returns
-------
Holder instance with the following attributes :
- `breakdown` : breakdown point
- `eff` : relative efficiency
- `param` : tuning parameter for norm
- `scale_bias` : correction term for Fisher consistency.
Notes
-----
Based on Rousseeuw and Leroy (1987). See table 19, p. 142 that can be
replicated by this function for TukeyBiweight norm.
Note, the results of this function are based computation without rounding
to decimal precision, and differ in some cases in the last digit from
the table by Rousseeuw and Leroy.
Numerical expectation and root finding based on scipy integrate and
optimize.
TODO: more options for details, numeric approximation and root finding.
There is currently no feasibility check in functions.
Reference
---------
Rousseeuw and Leroy book | tuning_s_estimator_mean | python | statsmodels/statsmodels | statsmodels/robust/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tools.py | BSD-3-Clause |
def scale_bias_cov_biw(c, k_vars):
"""Multivariate scale bias correction for TukeyBiweight norm.
This uses the chisquare distribution as reference distribution for the
squared Mahalanobis distance.
"""
p = k_vars # alias for formula
chip, chip2, chip4, chip6 = stats.chi2.cdf(c**2, [p, p + 2, p + 4, p + 6])
b = p / 2 * chip2 - p * (p + 2) / (2 * c**2) * chip4
b += p * (p + 2) * (p + 4) / (6 * c**4) * chip6 + c**2 / 6 * (1 - chip)
return b, b / (c**2 / 6) | Multivariate scale bias correction for TukeyBiweight norm.
This uses the chisquare distribution as reference distribution for the
squared Mahalanobis distance. | scale_bias_cov_biw | python | statsmodels/statsmodels | statsmodels/robust/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tools.py | BSD-3-Clause |
def scale_bias_cov(norm, k_vars):
"""Multivariate scale bias correction.
Parameter
---------
norm : norm instance
The rho function of the norm is used in the moment condition for
estimating scale.
k_vars : int
Number of random variables in the multivariate data.
Returns
-------
scale_bias: float
breakdown_point : float
Breakdown point computed as scale bias divided by max rho.
"""
def rho(x):
return norm.rho(np.sqrt(x))
scale_bias = stats.chi2.expect(rho, args=(k_vars,))
return scale_bias, scale_bias / norm.max_rho() | Multivariate scale bias correction.
Parameter
---------
norm : norm instance
The rho function of the norm is used in the moment condition for
estimating scale.
k_vars : int
Number of random variables in the multivariate data.
Returns
-------
scale_bias: float
breakdown_point : float
Breakdown point computed as scale bias divided by max rho. | scale_bias_cov | python | statsmodels/statsmodels | statsmodels/robust/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tools.py | BSD-3-Clause |
def tuning_s_cov(norm, k_vars, breakdown_point=0.5, limits=()):
"""Tuning parameter for multivariate S-estimator given breakdown point.
"""
from .norms import TukeyBiweight # avoid circular import
if not limits:
limits = (0.5, 30)
if isinstance(norm, TukeyBiweight):
def func(c):
return scale_bias_cov_biw(c, k_vars)[1] - breakdown_point
else:
norm = norm._set_tuning_param(2., inplace=False) # create copy
def func(c):
norm._set_tuning_param(c, inplace=True)
return scale_bias_cov(norm, k_vars)[1] - breakdown_point
p_tune = optimize.brentq(func, limits[0], limits[1])
return p_tune | Tuning parameter for multivariate S-estimator given breakdown point. | tuning_s_cov | python | statsmodels/statsmodels | statsmodels/robust/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tools.py | BSD-3-Clause |
def eff_mvmean(norm, k_vars):
"""Efficiency for M-estimator of multivariate mean at normal distribution.
This also applies to estimators that are locally equivalent to an
M-estimator such as S- and MM-estimators.
Parameters
----------
norm : instance of norm class
k_vars : int
Number of variables in multivariate random variable, i.e. dimension.
Returns
-------
eff : float
Asymptotic relative efficiency of mean at normal distribution.
alpha : float
Numerical integral. Efficiency is beta**2 / alpha
beta : float
Numerical integral.
Notes
-----
This implements equ. (5.3) p. 1671 in Lopuhaä 1989
References
----------
.. [1] Lopuhaä, Hendrik P. 1989. “On the Relation between S-Estimators
and M-Estimators of Multivariate Location and Covariance.”
The Annals of Statistics 17 (4): 1662–83.
"""
k = k_vars # shortcut
def f_alpha(d):
return norm.psi(d) ** 2 / k
def f_beta(d):
return (1 - 1 / k) * norm.weights(d) + 1 / k * norm.psi_deriv(d)
alpha = stats.chi(k).expect(f_alpha)
beta = stats.chi(k).expect(f_beta)
return beta**2 / alpha, alpha, beta | Efficiency for M-estimator of multivariate mean at normal distribution.
This also applies to estimators that are locally equivalent to an
M-estimator such as S- and MM-estimators.
Parameters
----------
norm : instance of norm class
k_vars : int
Number of variables in multivariate random variable, i.e. dimension.
Returns
-------
eff : float
Asymptotic relative efficiency of mean at normal distribution.
alpha : float
Numerical integral. Efficiency is beta**2 / alpha
beta : float
Numerical integral.
Notes
-----
This implements equ. (5.3) p. 1671 in Lopuhaä 1989
References
----------
.. [1] Lopuhaä, Hendrik P. 1989. “On the Relation between S-Estimators
and M-Estimators of Multivariate Location and Covariance.”
The Annals of Statistics 17 (4): 1662–83. | eff_mvmean | python | statsmodels/statsmodels | statsmodels/robust/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tools.py | BSD-3-Clause |
def eff_mvshape(norm, k_vars):
"""Efficiency of M-estimator of multivariate shape at normal distribution.
This also applies to estimators that are locally equivalent to an
M-estimator such as S- and MM-estimators.
Parameters
----------
norm : instance of norm class
k_vars : int
Number of variables in multivariate random variable, i.e. dimension.
Returns
-------
eff : float
Asymptotic relative efficiency of mean at normal distribution.
alpha : float
Numerical integral. Efficiency is beta**2 / alpha
beta : float
Numerical integral.
Notes
-----
This implements sigma_1 in equ. (5.5) p. 1671 in Lopuhaä 1989.
Efficiency of shape is approximately 1 / sigma1.
References
----------
.. [1] Lopuhaä, Hendrik P. 1989. “On the Relation between S-Estimators
and M-Estimators of Multivariate Location and Covariance.”
The Annals of Statistics 17 (4): 1662–83.
"""
k = k_vars # shortcut
def f_a(d):
return k * (k + 2) * norm.psi(d) ** 2 * d**2
def f_b(d):
return norm.psi_deriv(d) * d**2 + (k + 1) * norm.psi(d) * d
a = stats.chi(k).expect(f_a)
b = stats.chi(k).expect(f_b)
return b**2 / a, a, b | Efficiency of M-estimator of multivariate shape at normal distribution.
This also applies to estimators that are locally equivalent to an
M-estimator such as S- and MM-estimators.
Parameters
----------
norm : instance of norm class
k_vars : int
Number of variables in multivariate random variable, i.e. dimension.
Returns
-------
eff : float
Asymptotic relative efficiency of mean at normal distribution.
alpha : float
Numerical integral. Efficiency is beta**2 / alpha
beta : float
Numerical integral.
Notes
-----
This implements sigma_1 in equ. (5.5) p. 1671 in Lopuhaä 1989.
Efficiency of shape is approximately 1 / sigma1.
References
----------
.. [1] Lopuhaä, Hendrik P. 1989. “On the Relation between S-Estimators
and M-Estimators of Multivariate Location and Covariance.”
The Annals of Statistics 17 (4): 1662–83. | eff_mvshape | python | statsmodels/statsmodels | statsmodels/robust/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tools.py | BSD-3-Clause |
def tuning_m_cov_eff(norm, k_vars, efficiency=0.95, eff_mean=True, limits=()):
"""Tuning parameter for multivariate M-estimator given efficiency.
This also applies to estimators that are locally equivalent to an
M-estimator such as S- and MM-estimators.
Parameters
----------
norm : instance of norm class
k_vars : int
Number of variables in multivariate random variable, i.e. dimension.
efficiency : float < 1
Desired asymptotic relative efficiency of mean estimator.
Default is 0.95.
eff_mean : bool
If eff_mean is true (default), then tuning parameter is to achieve
efficiency of mean estimate.
If eff_mean is fale, then tuning parameter is to achieve efficiency
of shape estimate.
limits : tuple
Limits for rootfinding with scipy.optimize.brentq.
In some cases the interval limits for rootfinding can be too small
and not cover the root. Current default limits are (0.5, 30).
Returns
-------
float : Tuning parameter for the norm to achieve desired efficiency.
Asymptotic relative efficiency of mean at normal distribution.
Notes
-----
This uses numerical integration and rootfinding and will be
relatively slow.
"""
if not limits:
limits = (0.5, 30)
# make copy of norm
norm = norm._set_tuning_param(1, inplace=False)
if eff_mean:
def func(c):
norm._set_tuning_param(c, inplace=True)
return eff_mvmean(norm, k_vars)[0] - efficiency
else:
def func(c):
norm._set_tuning_param(c, inplace=True)
return eff_mvshape(norm, k_vars)[0] - efficiency
p_tune = optimize.brentq(func, limits[0], limits[1])
return p_tune | Tuning parameter for multivariate M-estimator given efficiency.
This also applies to estimators that are locally equivalent to an
M-estimator such as S- and MM-estimators.
Parameters
----------
norm : instance of norm class
k_vars : int
Number of variables in multivariate random variable, i.e. dimension.
efficiency : float < 1
Desired asymptotic relative efficiency of mean estimator.
Default is 0.95.
eff_mean : bool
If eff_mean is true (default), then tuning parameter is to achieve
efficiency of mean estimate.
If eff_mean is fale, then tuning parameter is to achieve efficiency
of shape estimate.
limits : tuple
Limits for rootfinding with scipy.optimize.brentq.
In some cases the interval limits for rootfinding can be too small
and not cover the root. Current default limits are (0.5, 30).
Returns
-------
float : Tuning parameter for the norm to achieve desired efficiency.
Asymptotic relative efficiency of mean at normal distribution.
Notes
-----
This uses numerical integration and rootfinding and will be
relatively slow. | tuning_m_cov_eff | python | statsmodels/statsmodels | statsmodels/robust/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tools.py | BSD-3-Clause |
def tukeybiweight_mvmean_eff(k, eff, eff_mean=True):
"""tuning parameter for biweight norm to achieve efficiency for mv-mean.
Uses values from precomputed table if available, otherwise computes it
numerically and adds it to the module global dict.
"""
if eff_mean:
table_dict = tukeybiweight_mvmean_eff_d
else:
table_dict = tukeybiweight_mvshape_eff_d
try:
tp = table_dict[(k, eff)]
except KeyError:
# compute and cache
from .norms import TukeyBiweight # avoid circular import
norm = TukeyBiweight(c=1)
tp = tuning_m_cov_eff(norm, k, efficiency=eff, eff_mean=eff_mean)
table_dict[(k, eff)] = tp
return tp | tuning parameter for biweight norm to achieve efficiency for mv-mean.
Uses values from precomputed table if available, otherwise computes it
numerically and adds it to the module global dict. | tukeybiweight_mvmean_eff | python | statsmodels/statsmodels | statsmodels/robust/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tools.py | BSD-3-Clause |
def _cabs(x):
"""absolute value function that changes complex sign based on real sign
This could be useful for complex step derivatives of functions that
need abs. Not yet used.
"""
sign = (x.real >= 0) * 2 - 1
return sign * x | absolute value function that changes complex sign based on real sign
This could be useful for complex step derivatives of functions that
need abs. Not yet used. | _cabs | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def rho(self, z):
"""
The robust criterion estimator function.
Abstract method:
-2 loglike used in M-estimator
"""
raise NotImplementedError | The robust criterion estimator function.
Abstract method:
-2 loglike used in M-estimator | rho | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi(self, z):
"""
Derivative of rho. Sometimes referred to as the influence function.
Abstract method:
psi = rho'
"""
raise NotImplementedError | Derivative of rho. Sometimes referred to as the influence function.
Abstract method:
psi = rho' | psi | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def weights(self, z):
"""
Returns the value of psi(z) / z
Abstract method:
psi(z) / z
"""
raise NotImplementedError | Returns the value of psi(z) / z
Abstract method:
psi(z) / z | weights | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
Derivative of psi. Used to obtain robust covariance matrix.
See statsmodels.rlm for more information.
Abstract method:
psi_derive = psi'
"""
raise NotImplementedError | Derivative of psi. Used to obtain robust covariance matrix.
See statsmodels.rlm for more information.
Abstract method:
psi_derive = psi' | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def __call__(self, z):
"""
Returns the value of estimator rho applied to an input
"""
return self.rho(z) | Returns the value of estimator rho applied to an input | __call__ | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def rho(self, z):
"""
The least squares estimator rho function
Parameters
----------
z : ndarray
1d array
Returns
-------
rho : ndarray
rho(z) = (1/2.)*z**2
"""
return z**2 * 0.5 | The least squares estimator rho function
Parameters
----------
z : ndarray
1d array
Returns
-------
rho : ndarray
rho(z) = (1/2.)*z**2 | rho | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi(self, z):
"""
The psi function for the least squares estimator
The analytic derivative of rho
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray
psi(z) = z
"""
return np.asarray(z) | The psi function for the least squares estimator
The analytic derivative of rho
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray
psi(z) = z | psi | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def weights(self, z):
"""
The least squares estimator weighting function for the IRLS algorithm.
The psi function scaled by the input z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray
weights(z) = np.ones(z.shape)
"""
z = np.asarray(z)
return np.ones(z.shape, np.float64) | The least squares estimator weighting function for the IRLS algorithm.
The psi function scaled by the input z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray
weights(z) = np.ones(z.shape) | weights | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
The derivative of the least squares psi function.
Returns
-------
psi_deriv : ndarray
ones(z.shape)
Notes
-----
Used to estimate the robust covariance matrix.
"""
z = np.asarray(z)
return np.ones(z.shape, np.float64) | The derivative of the least squares psi function.
Returns
-------
psi_deriv : ndarray
ones(z.shape)
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _set_tuning_param(self, c, inplace=False):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param.
"""
if inplace:
self.t = c
return self
else:
return self.__class__(t=c) | Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param. | _set_tuning_param | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _subset(self, z):
"""
Huber's T is defined piecewise over the range for z
"""
z = np.asarray(z)
return np.less_equal(np.abs(z), self.t) | Huber's T is defined piecewise over the range for z | _subset | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
The derivative of Huber's t psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
return np.less_equal(np.abs(z), self.t).astype(float) | The derivative of Huber's t psi function
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
The derivative of Ramsay's Ea psi function.
Notes
-----
Used to estimate the robust covariance matrix.
"""
a = self.a
x = np.exp(-a * np.abs(z))
dx = -a * x * np.sign(z)
y = z
dy = 1
return x * dy + y * dx | The derivative of Ramsay's Ea psi function.
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _subset(self, z):
"""
Andrew's wave is defined piecewise over the range of z.
"""
z = np.asarray(z)
return np.less_equal(np.abs(z), self.a * np.pi) | Andrew's wave is defined piecewise over the range of z. | _subset | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
The derivative of Andrew's wave psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
test = self._subset(z)
return test * np.cos(z / self.a) | The derivative of Andrew's wave psi function
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _subset(self, z):
"""
Least trimmed mean is defined piecewise over the range of z.
"""
z = np.asarray(z)
return np.less_equal(np.abs(z), self.c) | Least trimmed mean is defined piecewise over the range of z. | _subset | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
The derivative of least trimmed mean psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
test = self._subset(z)
return test | The derivative of least trimmed mean psi function
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _subset(self, z):
"""
Hampel's function is defined piecewise over the range of z
"""
z = np.abs(np.asarray(z))
t1 = np.less_equal(z, self.a)
t2 = np.less_equal(z, self.b) * np.greater(z, self.a)
t3 = np.less_equal(z, self.c) * np.greater(z, self.b)
return t1, t2, t3 | Hampel's function is defined piecewise over the range of z | _subset | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""Derivative of psi function, second derivative of rho function.
"""
a, b, c = self.a, self.b, self.c
z_isscalar = np.isscalar(z)
z = np.atleast_1d(z)
t1, _, t3 = self._subset(z)
dt = np.promote_types(z.dtype, "float")
d = np.zeros(z.shape, dtype=dt)
d[t1] = 1.0
zt3 = z[t3]
d[t3] = -(a * np.sign(zt3) * zt3) / (np.abs(zt3) * (c - b))
if z_isscalar:
d = d[0]
return d | Derivative of psi function, second derivative of rho function. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def get_tuning(cls, bp=None, eff=None):
"""Tuning parameter for given breakdown point or efficiency.
This currently only return values from a table.
Parameters
----------
bp : float in [0.05, 0.5] or None
Required breakdown point
Either bp or eff has to be specified, but not both.
eff : float or None
Required asymptotic efficiency.
Either bp or eff has to be specified, but not both.
Returns
-------
float : tuning parameter.
"""
if ((bp is None and eff is None) or
(bp is not None and eff is not None)):
raise ValueError("exactly one of bp and eff needs to be provided")
if bp is not None:
return _tables.tukeybiweight_bp[bp]
elif eff is not None:
return _tables.tukeybiweight_eff[eff] | Tuning parameter for given breakdown point or efficiency.
This currently only return values from a table.
Parameters
----------
bp : float in [0.05, 0.5] or None
Required breakdown point
Either bp or eff has to be specified, but not both.
eff : float or None
Required asymptotic efficiency.
Either bp or eff has to be specified, but not both.
Returns
-------
float : tuning parameter. | get_tuning | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _subset(self, z):
"""
Tukey's biweight is defined piecewise over the range of z
"""
z = np.abs(np.asarray(z))
return np.less_equal(z, self.c) | Tukey's biweight is defined piecewise over the range of z | _subset | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
The derivative of Tukey's biweight psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
subset = self._subset(z)
return subset * ((1 - (z/self.c)**2)**2
- (4*z**2/self.c**2) * (1-(z/self.c)**2)) | The derivative of Tukey's biweight psi function
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def _subset(self, z):
"""
TukeyQuartic is defined piecewise over the range of z
"""
z = np.abs(np.asarray(z))
return np.less_equal(z, self.c) | TukeyQuartic is defined piecewise over the range of z | _subset | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
The derivative of the TukeyQuartic psi function.
Notes
-----
Used to estimate the robust covariance matrix.
"""
c = self.c
k = self.k
subset = self._subset(z)
x = z / c
# d/dx(x (1 - (x/c)^k)^2) = -(1 - (x/c)^k) (2 k (x/c)^k + (x/c)^k - 1)
return subset * (1 - x**k) * (1 - (2 * k + 1) * x**k) | The derivative of the TukeyQuartic psi function.
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def rho(self, z):
"""
The rho function of the StudentT norm.
Parameters
----------
z : ndarray
1d array
Returns
-------
rho : ndarray
rho(z) = (c**2 * df / 2.) * log(df + (z / c)**2) - const
The ``const`` shifts the rho function so that rho(0) = 0.
"""
c = self.c
df = self.df
z = np.asarray(z)
const = (c**2 * df / 2.) * np.log(df) if df != 0 else 0
return (c**2 * df / 2.) * np.log(df + (z / c)**2) - const | The rho function of the StudentT norm.
Parameters
----------
z : ndarray
1d array
Returns
-------
rho : ndarray
rho(z) = (c**2 * df / 2.) * log(df + (z / c)**2) - const
The ``const`` shifts the rho function so that rho(0) = 0. | rho | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi(self, z):
"""
The psi function of the StudentT norm.
The analytic derivative of rho.
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray
psi(z) = z
"""
c = self.c
df = self.df
z = np.asarray(z)
return z * df / (df + (z / c)**2) | The psi function of the StudentT norm.
The analytic derivative of rho.
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray
psi(z) = z | psi | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def weights(self, z):
"""
The weighting function for the IRLS algorithm of the StudentT norm.
The psi function scaled by the input z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray
weights(z) = np.ones(z.shape)
"""
c = self.c
df = self.df
z = np.asarray(z)
return df / (df + (z / c)**2) | The weighting function for the IRLS algorithm of the StudentT norm.
The psi function scaled by the input z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray
weights(z) = np.ones(z.shape) | weights | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
"""
The derivative of the psi function of the StudentT norm.
Returns
-------
psi_deriv : ndarray
ones(z.shape)
Notes
-----
Used to estimate the robust covariance matrix.
"""
c = self.c
df = self.df
x = np.asarray(z) / c
return - 2 * df * x**2 / (df + x**2)**2 + df / (df + x**2) | The derivative of the psi function of the StudentT norm.
Returns
-------
psi_deriv : ndarray
ones(z.shape)
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def rho(self, z):
"""
The robust criterion function for MQuantileNorm.
Parameters
----------
z : array_like
1d array
Returns
-------
rho : ndarray
"""
qq = self._get_q(z)
return qq * self.base_norm.rho(z) | The robust criterion function for MQuantileNorm.
Parameters
----------
z : array_like
1d array
Returns
-------
rho : ndarray | rho | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi(self, z):
"""
The psi function for MQuantileNorm estimator.
The analytic derivative of rho
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray
"""
qq = self._get_q(z)
return qq * self.base_norm.psi(z) | The psi function for MQuantileNorm estimator.
The analytic derivative of rho
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray | psi | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def weights(self, z):
"""
MQuantileNorm weighting function for the IRLS algorithm
The psi function scaled by z, psi(z) / z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray
"""
qq = self._get_q(z)
return qq * self.base_norm.weights(z) | MQuantileNorm weighting function for the IRLS algorithm
The psi function scaled by z, psi(z) / z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray | weights | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def psi_deriv(self, z):
'''
The derivative of MQuantileNorm function
Parameters
----------
z : array_like
1d array
Returns
-------
psi_deriv : ndarray
Notes
-----
Used to estimate the robust covariance matrix.
'''
qq = self._get_q(z)
return qq * self.base_norm.psi_deriv(z) | The derivative of MQuantileNorm function
Parameters
----------
z : array_like
1d array
Returns
-------
psi_deriv : ndarray
Notes
-----
Used to estimate the robust covariance matrix. | psi_deriv | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def estimate_location(a, scale, norm=None, axis=0, initial=None,
maxiter=30, tol=1.0e-06):
"""
M-estimator of location using self.norm and a current
estimator of scale.
This iteratively finds a solution to
norm.psi((a-mu)/scale).sum() == 0
Parameters
----------
a : ndarray
Array over which the location parameter is to be estimated
scale : ndarray
Scale parameter to be used in M-estimator
norm : RobustNorm, optional
Robust norm used in the M-estimator. The default is HuberT().
axis : int, optional
Axis along which to estimate the location parameter. The default is 0.
initial : ndarray, optional
Initial condition for the location parameter. Default is None, which
uses the median of a.
niter : int, optional
Maximum number of iterations. The default is 30.
tol : float, optional
Toleration for convergence. The default is 1e-06.
Returns
-------
mu : ndarray
Estimate of location
"""
if norm is None:
norm = HuberT()
if initial is None:
mu = np.median(a, axis)
else:
mu = initial
for _ in range(maxiter):
W = norm.weights((a-mu)/scale)
nmu = np.sum(W*a, axis) / np.sum(W, axis)
if np.all(np.less(np.abs(mu - nmu), scale * tol)):
return nmu
else:
mu = nmu
raise ValueError("location estimator failed to converge in %d iterations"
% maxiter) | M-estimator of location using self.norm and a current
estimator of scale.
This iteratively finds a solution to
norm.psi((a-mu)/scale).sum() == 0
Parameters
----------
a : ndarray
Array over which the location parameter is to be estimated
scale : ndarray
Scale parameter to be used in M-estimator
norm : RobustNorm, optional
Robust norm used in the M-estimator. The default is HuberT().
axis : int, optional
Axis along which to estimate the location parameter. The default is 0.
initial : ndarray, optional
Initial condition for the location parameter. Default is None, which
uses the median of a.
niter : int, optional
Maximum number of iterations. The default is 30.
tol : float, optional
Toleration for convergence. The default is 1e-06.
Returns
-------
mu : ndarray
Estimate of location | estimate_location | python | statsmodels/statsmodels | statsmodels/robust/norms.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/norms.py | BSD-3-Clause |
def mad(a, c=Gaussian.ppf(3 / 4.0), axis=0, center=np.median):
# c \approx .6745
"""
The Median Absolute Deviation along given axis of an array
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),
which is approximately 0.6745.
axis : int, optional
The default is 0. Can also be None.
center : callable or float
If a callable is provided, such as the default `np.median` then it
is expected to be called center(a). The axis argument will be applied
via np.apply_over_axes. Otherwise, provide a float.
Returns
-------
mad : float
`mad` = median(abs(`a` - center))/`c`
"""
a = array_like(a, "a", ndim=None)
c = float_like(c, "c")
if not a.size:
center_val = 0.0
elif callable(center):
if axis is not None:
center_val = np.apply_over_axes(center, a, axis)
else:
center_val = center(a.ravel())
else:
center_val = float_like(center, "center")
err = (np.abs(a - center_val)) / c
if not err.size:
if axis is None or err.ndim == 1:
return np.nan
else:
shape = list(err.shape)
shape.pop(axis)
return np.empty(shape)
return np.median(err, axis=axis) | The Median Absolute Deviation along given axis of an array
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),
which is approximately 0.6745.
axis : int, optional
The default is 0. Can also be None.
center : callable or float
If a callable is provided, such as the default `np.median` then it
is expected to be called center(a). The axis argument will be applied
via np.apply_over_axes. Otherwise, provide a float.
Returns
-------
mad : float
`mad` = median(abs(`a` - center))/`c` | mad | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def iqr(a, c=Gaussian.ppf(3 / 4) - Gaussian.ppf(1 / 4), axis=0):
"""
The normalized interquartile range along given axis of an array
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant, used to get consistent estimates of the
standard deviation at the normal distribution. Defined as
scipy.stats.norm.ppf(3/4.) - scipy.stats.norm.ppf(1/4.), which is
approximately 1.349.
axis : int, optional
The default is 0. Can also be None.
Returns
-------
The normalized interquartile range
"""
a = array_like(a, "a", ndim=None)
c = float_like(c, "c")
if a.ndim == 0:
raise ValueError("a should have at least one dimension")
elif a.size == 0:
return np.nan
else:
quantiles = np.quantile(a, [0.25, 0.75], axis=axis)
return np.squeeze(np.diff(quantiles, axis=0) / c) | The normalized interquartile range along given axis of an array
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant, used to get consistent estimates of the
standard deviation at the normal distribution. Defined as
scipy.stats.norm.ppf(3/4.) - scipy.stats.norm.ppf(1/4.), which is
approximately 1.349.
axis : int, optional
The default is 0. Can also be None.
Returns
-------
The normalized interquartile range | iqr | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def qn_scale(a, c=1 / (np.sqrt(2) * Gaussian.ppf(5 / 8)), axis=0):
"""
Computes the Qn robust estimator of scale
The Qn scale estimator is a more efficient alternative to the MAD.
The Qn scale estimator of an array a of length n is defined as
c * {abs(a[i] - a[j]): i<j}_(k), for k equal to [n/2] + 1 choose 2. Thus,
the Qn estimator is the k-th order statistic of the absolute differences
of the array. The optional constant is used to normalize the estimate
as explained below. The implementation follows the algorithm described
in Croux and Rousseeuw (1992).
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant. The default value is used to get consistent
estimates of the standard deviation at the normal distribution.
axis : int, optional
The default is 0.
Returns
-------
{float, ndarray}
The Qn robust estimator of scale
"""
a = array_like(
a, "a", ndim=None, dtype=np.float64, contiguous=True, order="C"
)
c = float_like(c, "c")
if a.ndim == 0:
raise ValueError("a should have at least one dimension")
elif a.size == 0:
return np.nan
else:
out = np.apply_along_axis(_qn, axis=axis, arr=a, c=c)
if out.ndim == 0:
return float(out)
return out | Computes the Qn robust estimator of scale
The Qn scale estimator is a more efficient alternative to the MAD.
The Qn scale estimator of an array a of length n is defined as
c * {abs(a[i] - a[j]): i<j}_(k), for k equal to [n/2] + 1 choose 2. Thus,
the Qn estimator is the k-th order statistic of the absolute differences
of the array. The optional constant is used to normalize the estimate
as explained below. The implementation follows the algorithm described
in Croux and Rousseeuw (1992).
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant. The default value is used to get consistent
estimates of the standard deviation at the normal distribution.
axis : int, optional
The default is 0.
Returns
-------
{float, ndarray}
The Qn robust estimator of scale | qn_scale | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def _qn_naive(a, c=1 / (np.sqrt(2) * Gaussian.ppf(5 / 8))):
"""
A naive implementation of the Qn robust estimator of scale, used solely
to test the faster, more involved one
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant, used to get consistent estimates of the
standard deviation at the normal distribution. Defined as
1/(np.sqrt(2) * scipy.stats.norm.ppf(5/8)), which is 2.219144.
Returns
-------
The Qn robust estimator of scale
"""
a = np.squeeze(a)
n = a.shape[0]
if a.size == 0:
return np.nan
else:
h = int(n // 2 + 1)
k = int(h * (h - 1) / 2)
idx = np.triu_indices(n, k=1)
diffs = np.abs(a[idx[0]] - a[idx[1]])
output = np.partition(diffs, kth=k - 1)[k - 1]
output = c * output
return output | A naive implementation of the Qn robust estimator of scale, used solely
to test the faster, more involved one
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant, used to get consistent estimates of the
standard deviation at the normal distribution. Defined as
1/(np.sqrt(2) * scipy.stats.norm.ppf(5/8)), which is 2.219144.
Returns
-------
The Qn robust estimator of scale | _qn_naive | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def __call__(self, a, mu=None, initscale=None, axis=0):
"""
Compute Huber's proposal 2 estimate of scale, using an optional
initial value of scale and an optional estimate of mu. If mu
is supplied, it is not reestimated.
Parameters
----------
a : ndarray
1d array
mu : float or None, optional
If the location mu is supplied then it is not reestimated.
Default is None, which means that it is estimated.
initscale : float or None, optional
A first guess on scale. If initscale is None then the standardized
median absolute deviation of a is used.
Notes
-----
`Huber` minimizes the function
sum(psi((a[i]-mu)/scale)**2)
as a function of (mu, scale), where
psi(x) = np.clip(x, -self.c, self.c)
"""
a = np.asarray(a)
if mu is None:
n = a.shape[axis] - 1
mu = np.median(a, axis=axis)
est_mu = True
else:
n = a.shape[axis]
mu = mu
est_mu = False
if initscale is None:
scale = mad(a, axis=axis)
else:
scale = initscale
scale = tools.unsqueeze(scale, axis, a.shape)
mu = tools.unsqueeze(mu, axis, a.shape)
return self._estimate_both(a, scale, mu, axis, est_mu, n) | Compute Huber's proposal 2 estimate of scale, using an optional
initial value of scale and an optional estimate of mu. If mu
is supplied, it is not reestimated.
Parameters
----------
a : ndarray
1d array
mu : float or None, optional
If the location mu is supplied then it is not reestimated.
Default is None, which means that it is estimated.
initscale : float or None, optional
A first guess on scale. If initscale is None then the standardized
median absolute deviation of a is used.
Notes
-----
`Huber` minimizes the function
sum(psi((a[i]-mu)/scale)**2)
as a function of (mu, scale), where
psi(x) = np.clip(x, -self.c, self.c) | __call__ | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def fit(self, data, start_scale='mad', maxiter=100, rtol=1e-6, atol=1e-8):
"""
Estimate M-scale using iteration.
Parameters
----------
data : array-like
Data, currently assumed to be 1-dimensional.
start_scale : string or float.
Starting value of scale or method to compute the starting value.
Default is using 'mad', no other string options are available.
maxiter : int
Maximum number of iterations.
rtol : float
Relative convergence tolerance.
atol : float
Absolute onvergence tolerance.
Returns
-------
float : Scale estimate. The estimated variance is scale squared.
Todo: switch to Holder instance with more information.
"""
scale = _scale_iter(
data,
scale0=start_scale,
maxiter=maxiter, rtol=rtol, atol=atol,
meef_scale=self.chi_func,
scale_bias=self.scale_bias,
)
return scale | Estimate M-scale using iteration.
Parameters
----------
data : array-like
Data, currently assumed to be 1-dimensional.
start_scale : string or float.
Starting value of scale or method to compute the starting value.
Default is using 'mad', no other string options are available.
maxiter : int
Maximum number of iterations.
rtol : float
Relative convergence tolerance.
atol : float
Absolute onvergence tolerance.
Returns
-------
float : Scale estimate. The estimated variance is scale squared.
Todo: switch to Holder instance with more information. | fit | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def scale_trimmed(data, alpha, center='median', axis=0, distr=None,
distargs=None):
"""scale estimate based on symmetrically trimmed sample
The scale estimate is robust to a fraction alpha of outliers on each
tail.
The scale is normalized to correspond to a reference distribution, which
is the normal distribution by default.
Parameters
----------
data : array_like
dataset, by default (axis=0) observations are assumed to be in rows
and variables in columns.
alpha : float in interval (0, 1)
Trimming fraction in each tail. The floor(nobs * alpha) smallest
observations are trimmed, and the same number of the largest
observations are trimmed. scale estimate is base on a fraction
(1 - 2 * alpha) of observations.
center : 'median', 'mean', 'tmean' or number
`center` defines how the trimmed sample is centered. 'median' and
'mean' are calculated on the full sample. `tmean` is the trimmed
mean, calculated with the trimmed sample. If `center` is array_like
then it needs to be scalar or correspond to the shape of the data
reduced by axis.
axis : int, default is 0
axis along which scale is estimated.
distr : None, 'raw' or a distribution instance
Default if distr is None is the normal distribution `scipy.stats.norm`.
This is the reference distribution to normalize the scale.
Note: This cannot be a frozen instance, since it does not have an
`expect` method.
If distr is 'raw', then the scale is not normalized.
distargs :
Arguments for the distribution.
Returns
-------
scale : float or array
the estimated scale normalized for the reference distribution.
Examples
--------
for normal distribution
>>> np.random.seed(1)
>>> x = 2 * np.random.randn(100)
>>> scale_trimmed(x, 0.1)
1.7479516739879672
for t distribution
>>> xt = stats.t.rvs(3, size=1000, scale=2)
>>> print scale_trimmed(xt, alpha, distr=stats.t, distargs=(3,))
2.06574778599
compare to standard deviation of sample
>>> xt.std()
3.1457788359130481
"""
if distr is None:
distr = stats.norm
if distargs is None:
distargs = ()
x = np.array(data) # make copy for inplace sort
if axis is None:
x = x.ravel()
axis = 0
# TODO: latest numpy has partial sort
x.sort(axis)
nobs = x.shape[axis]
if distr == 'raw':
c_inv = 1
else:
bound = distr.ppf(1 - alpha, *distargs)
c_inv = distr.expect(lambda x: x*x, lb=-bound, ub=bound, args=distargs)
cut_idx = np.floor(nobs * alpha).astype(int)
sl = [slice(None, None, None)] * x.ndim
sl[axis] = slice(cut_idx, -cut_idx)
# x_trimmed = x[cut_idx:-cut_idx]
# cut in axis
x_trimmed = x[tuple(sl)]
center_type = center
if center in ['med', 'median']:
center = np.median(x, axis=axis)
elif center == 'mean':
center = np.mean(x, axis=axis)
elif center == 'tmean':
center = np.mean(x_trimmed, axis=axis)
else:
# assume number
center_type = 'user'
center_ndim = np.ndim(center)
if (center_ndim > 0) and (center_ndim < x.ndim):
center = np.expand_dims(center, axis)
s_raw = ((x_trimmed - center)**2).sum(axis)
scale = np.sqrt(s_raw / nobs / c_inv)
res = Holder(scale=scale,
center=center,
center_type=center_type,
trim_idx=cut_idx,
nobs=nobs,
distr=distr,
scale_correction=1. / c_inv)
return res | scale estimate based on symmetrically trimmed sample
The scale estimate is robust to a fraction alpha of outliers on each
tail.
The scale is normalized to correspond to a reference distribution, which
is the normal distribution by default.
Parameters
----------
data : array_like
dataset, by default (axis=0) observations are assumed to be in rows
and variables in columns.
alpha : float in interval (0, 1)
Trimming fraction in each tail. The floor(nobs * alpha) smallest
observations are trimmed, and the same number of the largest
observations are trimmed. scale estimate is base on a fraction
(1 - 2 * alpha) of observations.
center : 'median', 'mean', 'tmean' or number
`center` defines how the trimmed sample is centered. 'median' and
'mean' are calculated on the full sample. `tmean` is the trimmed
mean, calculated with the trimmed sample. If `center` is array_like
then it needs to be scalar or correspond to the shape of the data
reduced by axis.
axis : int, default is 0
axis along which scale is estimated.
distr : None, 'raw' or a distribution instance
Default if distr is None is the normal distribution `scipy.stats.norm`.
This is the reference distribution to normalize the scale.
Note: This cannot be a frozen instance, since it does not have an
`expect` method.
If distr is 'raw', then the scale is not normalized.
distargs :
Arguments for the distribution.
Returns
-------
scale : float or array
the estimated scale normalized for the reference distribution.
Examples
--------
for normal distribution
>>> np.random.seed(1)
>>> x = 2 * np.random.randn(100)
>>> scale_trimmed(x, 0.1)
1.7479516739879672
for t distribution
>>> xt = stats.t.rvs(3, size=1000, scale=2)
>>> print scale_trimmed(xt, alpha, distr=stats.t, distargs=(3,))
2.06574778599
compare to standard deviation of sample
>>> xt.std()
3.1457788359130481 | scale_trimmed | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def _weight_mean(x, c):
"""Tukey-biweight, bisquare weights used in tau scale.
Parameters
----------
x : ndarray
Data
c : float
Parameter for bisquare weights
Returns
-------
ndarray : weights
"""
x = np.asarray(x)
w = (1 - (x / c)**2)**2 * (np.abs(x) <= c)
return w | Tukey-biweight, bisquare weights used in tau scale.
Parameters
----------
x : ndarray
Data
c : float
Parameter for bisquare weights
Returns
-------
ndarray : weights | _weight_mean | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def _winsor(x, c):
"""Winsorized squared data used in tau scale.
Parameters
----------
x : ndarray
Data
c : float
threshold
Returns
-------
winsorized squared data, ``np.minimum(x**2, c**2)``
"""
return np.minimum(x**2, c**2) | Winsorized squared data used in tau scale.
Parameters
----------
x : ndarray
Data
c : float
threshold
Returns
-------
winsorized squared data, ``np.minimum(x**2, c**2)`` | _winsor | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def scale_tau(data, cm=4.5, cs=3, weight_mean=_weight_mean,
weight_scale=_winsor, normalize=True, ddof=0):
"""Tau estimator of univariate scale.
Experimental, API will change
Parameters
----------
data : array_like, 1-D or 2-D
If data is 2d, then the location and scale estimates
are calculated for each column
cm : float
constant used in call to weight_mean
cs : float
constant used in call to weight_scale
weight_mean : callable
function to calculate weights for weighted mean
weight_scale : callable
function to calculate scale, "rho" function
normalize : bool
rescale the scale estimate so it is consistent when the data is
normally distributed. The computation assumes winsorized (truncated)
variance.
Returns
-------
mean : nd_array
robust mean
std : nd_array
robust estimate of scale (standard deviation)
Notes
-----
Uses definition of Maronna and Zamar 2002, with weighted mean and
trimmed variance.
The normalization has been added to match R robustbase.
R robustbase uses by default ddof=0, with option to set it to 2.
References
----------
.. [1] Maronna, Ricardo A, and Ruben H Zamar. “Robust Estimates of Location
and Dispersion for High-Dimensional Datasets.” Technometrics 44, no. 4
(November 1, 2002): 307–17. https://doi.org/10.1198/004017002188618509.
"""
x = np.asarray(data)
nobs = x.shape[0]
med_x = np.median(x, 0)
xdm = x - med_x
mad_x = np.median(np.abs(xdm), 0)
wm = weight_mean(xdm / mad_x, cm)
mean = (wm * x).sum(0) / wm.sum(0)
var = (mad_x**2 * weight_scale((x - mean) / mad_x, cs).sum(0) /
(nobs - ddof))
cf = 1
if normalize:
c = cs * stats.norm.ppf(0.75)
cf = 2 * ((1 - c**2) * stats.norm.cdf(c) - c * stats.norm.pdf(c)
+ c**2) - 1
# return Holder(loc=mean, scale=np.sqrt(var / cf))
return mean, np.sqrt(var / cf) | Tau estimator of univariate scale.
Experimental, API will change
Parameters
----------
data : array_like, 1-D or 2-D
If data is 2d, then the location and scale estimates
are calculated for each column
cm : float
constant used in call to weight_mean
cs : float
constant used in call to weight_scale
weight_mean : callable
function to calculate weights for weighted mean
weight_scale : callable
function to calculate scale, "rho" function
normalize : bool
rescale the scale estimate so it is consistent when the data is
normally distributed. The computation assumes winsorized (truncated)
variance.
Returns
-------
mean : nd_array
robust mean
std : nd_array
robust estimate of scale (standard deviation)
Notes
-----
Uses definition of Maronna and Zamar 2002, with weighted mean and
trimmed variance.
The normalization has been added to match R robustbase.
R robustbase uses by default ddof=0, with option to set it to 2.
References
----------
.. [1] Maronna, Ricardo A, and Ruben H Zamar. “Robust Estimates of Location
and Dispersion for High-Dimensional Datasets.” Technometrics 44, no. 4
(November 1, 2002): 307–17. https://doi.org/10.1198/004017002188618509. | scale_tau | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def _scale_iter(data, scale0='mad', maxiter=100, rtol=1e-6, atol=1e-8,
meef_scale=None, scale_bias=None, iter_method="rho", ddof=0):
"""iterative scale estimate base on "rho" function
"""
x = np.asarray(data)
nobs = x.shape[0]
if scale0 == 'mad':
scale0 = mad(x, center=0)
for i in range(maxiter):
x_scaled = x / scale0
if iter_method == "rho":
scale = scale0 * np.sqrt(
np.sum(meef_scale(x / scale0)) / scale_bias / (nobs - ddof))
else:
weights_scale = meef_scale(x_scaled) / (1e-50 + x_scaled**2)
scale2 = (weights_scale * x**2).sum() / (nobs - ddof)
scale2 /= scale_bias
scale = np.sqrt(scale2)
if debug:
print(scale)
if np.allclose(scale, scale0, atol=atol, rtol=rtol):
break
scale0 = scale
return scale | iterative scale estimate base on "rho" function | _scale_iter | python | statsmodels/statsmodels | statsmodels/robust/scale.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/scale.py | BSD-3-Clause |
def fit(self, h=None, scale_binding=False, start=None):
"""Estimate the model
Parameters
----------
h : int
The size of the initial sets for the S-estimator.
Default is .... (todo)
scale_binding : bool
If true, then the scale is fixed in the second stage M-estimation,
i.e. this is the MM-estimator.
If false, then the high breakdown point M-scale is used also in the
second stage M-estimation if that estimated scale is smaller than
the scale of the preliminary, first stage S-estimato.
start : tuple or None
If None, then the starting parameters and scale for the second
stage M-estimation are taken from the fist stage S-estimator.
Alternatively, the starting parameters and starting scale can be
provided by the user as tuple (start_params, start_scale). In this
case the first stage S-estimation in skipped.
maxiter, other optimization parameters are still missing (todo)
Returns
-------
results instance
Notes
-----
If scale_binding is false, then the estimator is a standard
MM-estimator with fixed scale in the second stage M-estimation.
If scale_binding is true, then the estimator will try to find an
estimate with lower M-scale using the same scale-norm rho as in the
first stage S-estimator. If the estimated scale, is not smaller than
then the scale estimated in the first stage S-estimator, then the
fixed scale MM-estimator is returned.
"""
norm_m = self.norm_mean
if start is None:
res_s = super().fit(h)
start_params = np.asarray(res_s.params)
start_scale = res_s.scale
else:
start_params, start_scale = start
res_s = None
mod_m = RLM(self.endog, self.exog, M=norm_m)
res_mm = mod_m.fit(
start_params=start_params,
start_scale=start_scale,
update_scale=False
)
if not scale_binding:
# we can compute this first and skip MM if scale decrease
mod_sm = RLM(self.endog, self.exog, M=norm_m)
res_sm = mod_sm.fit(
start_params=start_params,
scale_est=self.mscale
)
if not scale_binding and res_sm.scale < res_mm.scale:
res = res_sm
else:
res = res_mm
res._results.results_dets = res_s
return res | Estimate the model
Parameters
----------
h : int
The size of the initial sets for the S-estimator.
Default is .... (todo)
scale_binding : bool
If true, then the scale is fixed in the second stage M-estimation,
i.e. this is the MM-estimator.
If false, then the high breakdown point M-scale is used also in the
second stage M-estimation if that estimated scale is smaller than
the scale of the preliminary, first stage S-estimato.
start : tuple or None
If None, then the starting parameters and scale for the second
stage M-estimation are taken from the fist stage S-estimator.
Alternatively, the starting parameters and starting scale can be
provided by the user as tuple (start_params, start_scale). In this
case the first stage S-estimation in skipped.
maxiter, other optimization parameters are still missing (todo)
Returns
-------
results instance
Notes
-----
If scale_binding is false, then the estimator is a standard
MM-estimator with fixed scale in the second stage M-estimation.
If scale_binding is true, then the estimator will try to find an
estimate with lower M-scale using the same scale-norm rho as in the
first stage S-estimator. If the estimated scale, is not smaller than
then the scale estimated in the first stage S-estimator, then the
fixed scale MM-estimator is returned. | fit | python | statsmodels/statsmodels | statsmodels/robust/resistant_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/resistant_linear_model.py | BSD-3-Clause |
def _initialize(self):
"""
Initializes the model for the IRLS fit.
Resets the history and number of iterations.
"""
self.pinv_wexog = np.linalg.pinv(self.exog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
self.df_resid = (float(self.exog.shape[0] -
np.linalg.matrix_rank(self.exog)))
self.df_model = float(np.linalg.matrix_rank(self.exog) - 1)
self.nobs = float(self.endog.shape[0]) | Initializes the model for the IRLS fit.
Resets the history and number of iterations. | _initialize | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array_like
Parameters of a linear model
exog : array_like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values
"""
# copied from linear_model # TODO: then is it needed?
if exog is None:
exog = self.exog
return np.dot(exog, params) | Return linear predicted values from a design matrix.
Parameters
----------
params : array_like
Parameters of a linear model
exog : array_like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values | predict | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def deviance(self, tmp_results):
"""
Returns the (unnormalized) log-likelihood from the M estimator.
"""
tmp_resid = self.endog - tmp_results.fittedvalues
return self.M(tmp_resid / tmp_results.scale).sum() | Returns the (unnormalized) log-likelihood from the M estimator. | deviance | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def _estimate_scale(self, resid):
"""
Estimates the scale based on the option provided to the fit method.
"""
if isinstance(self.scale_est, str):
if self.scale_est.lower() == 'mad':
return scale.mad(resid, center=0)
else:
raise ValueError("Option %s for scale_est not understood" %
self.scale_est)
elif isinstance(self.scale_est, scale.HuberScale):
return self.scale_est(self.df_resid, self.nobs, resid)
else:
# use df correction to match HuberScale
return self.scale_est(resid) * np.sqrt(self.nobs / self.df_resid) | Estimates the scale based on the option provided to the fit method. | _estimate_scale | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def fit(self, maxiter=50, tol=1e-8, scale_est='mad', init=None, cov='H1',
update_scale=True, conv='dev', start_params=None, start_scale=None,
):
"""
Fits the model using iteratively reweighted least squares.
The IRLS routine runs until the specified objective converges to `tol`
or `maxiter` has been reached.
Parameters
----------
conv : str
Indicates the convergence criteria.
Available options are "coefs" (the coefficients), "weights" (the
weights in the iteration), "sresid" (the standardized residuals),
and "dev" (the un-normalized log-likelihood for the M
estimator). The default is "dev".
cov : str, optional
'H1', 'H2', or 'H3'
Indicates how the covariance matrix is estimated. Default is 'H1'.
See rlm.RLMResults for more information.
init : str
Specifies method for the initial estimates of the parameters.
Default is None, which means that the least squares estimate
is used. Currently it is the only available choice.
Deprecated and will be removed. There is no choice here.
maxiter : int
The maximum number of iterations to try. Default is 50.
scale_est : str or HuberScale()
'mad' or HuberScale()
Indicates the estimate to use for scaling the weights in the IRLS.
The default is 'mad' (median absolute deviation. Other options are
'HuberScale' for Huber's proposal 2. Huber's proposal 2 has
optional keyword arguments d, tol, and maxiter for specifying the
tuning constant, the convergence tolerance, and the maximum number
of iterations. See statsmodels.robust.scale for more information.
tol : float
The convergence tolerance of the estimate. Default is 1e-8.
update_scale : Bool
If `update_scale` is False then the scale estimate for the
weights is held constant over the iteration. Otherwise, it
is updated for each fit in the iteration. Default is True.
start_params : array_like, optional
Initial guess of the solution of the optimizer. If not provided,
the initial parameters are computed using OLS.
start_scale : float, optional
Initial scale. If update_scale is False, then the scale will be
fixed at this level for the estimation of the mean parameters.
during iteration. If not provided, then the initial scale is
estimated from the OLS residuals
Returns
-------
results : statsmodels.rlm.RLMresults
Results instance
"""
if cov.upper() not in ["H1", "H2", "H3"]:
raise ValueError("Covariance matrix %s not understood" % cov)
else:
self.cov = cov.upper()
conv = conv.lower()
if conv not in ["weights", "coefs", "dev", "sresid"]:
raise ValueError("Convergence argument %s not understood" % conv)
self.scale_est = scale_est
if start_params is None:
wls_results = lm.WLS(self.endog, self.exog).fit()
else:
start_params = np.asarray(start_params, dtype=np.double).squeeze()
start_params = np.atleast_1d(start_params)
if (start_params.shape[0] != self.exog.shape[1] or
start_params.ndim != 1):
raise ValueError('start_params must by a 1-d array with {} '
'values'.format(self.exog.shape[1]))
fake_wls = reg_tools._MinimalWLS(self.endog, self.exog,
weights=np.ones_like(self.endog),
check_weights=False)
wls_results = fake_wls.results(start_params)
if not init and not start_scale:
self.scale = self._estimate_scale(wls_results.resid)
elif start_scale:
self.scale = start_scale
if not update_scale:
self.scale_est = scale_est = "fixed"
history = dict(params=[np.inf], scale=[])
if conv == 'coefs':
criterion = history['params']
elif conv == 'dev':
history.update(dict(deviance=[np.inf]))
criterion = history['deviance']
elif conv == 'sresid':
history.update(dict(sresid=[np.inf]))
criterion = history['sresid']
elif conv == 'weights':
history.update(dict(weights=[np.inf]))
criterion = history['weights']
# done one iteration so update
history = self._update_history(wls_results, history, conv)
iteration = 1
converged = 0
while not converged:
if self.scale == 0.0:
import warnings
warnings.warn('Estimated scale is 0.0 indicating that the most'
' last iteration produced a perfect fit of the '
'weighted data.', ConvergenceWarning)
break
self.weights = self.M.weights(wls_results.resid / self.scale)
wls_results = reg_tools._MinimalWLS(self.endog, self.exog,
weights=self.weights,
check_weights=True).fit()
if update_scale is True:
self.scale = self._estimate_scale(wls_results.resid)
history = self._update_history(wls_results, history, conv)
iteration += 1
converged = _check_convergence(criterion, iteration, tol, maxiter)
results = RLMResults(self, wls_results.params,
self.normalized_cov_params, self.scale)
history['iteration'] = iteration
results.fit_history = history
results.fit_options = dict(cov=cov.upper(), scale_est=scale_est,
norm=self.M.__class__.__name__, conv=conv)
# norm is not changed in fit, no old state
# doing the next causes exception
# self.cov = self.scale_est = None #reset for additional fits
# iteration and history could contain wrong state with repeated fit
return RLMResultsWrapper(results) | Fits the model using iteratively reweighted least squares.
The IRLS routine runs until the specified objective converges to `tol`
or `maxiter` has been reached.
Parameters
----------
conv : str
Indicates the convergence criteria.
Available options are "coefs" (the coefficients), "weights" (the
weights in the iteration), "sresid" (the standardized residuals),
and "dev" (the un-normalized log-likelihood for the M
estimator). The default is "dev".
cov : str, optional
'H1', 'H2', or 'H3'
Indicates how the covariance matrix is estimated. Default is 'H1'.
See rlm.RLMResults for more information.
init : str
Specifies method for the initial estimates of the parameters.
Default is None, which means that the least squares estimate
is used. Currently it is the only available choice.
Deprecated and will be removed. There is no choice here.
maxiter : int
The maximum number of iterations to try. Default is 50.
scale_est : str or HuberScale()
'mad' or HuberScale()
Indicates the estimate to use for scaling the weights in the IRLS.
The default is 'mad' (median absolute deviation. Other options are
'HuberScale' for Huber's proposal 2. Huber's proposal 2 has
optional keyword arguments d, tol, and maxiter for specifying the
tuning constant, the convergence tolerance, and the maximum number
of iterations. See statsmodels.robust.scale for more information.
tol : float
The convergence tolerance of the estimate. Default is 1e-8.
update_scale : Bool
If `update_scale` is False then the scale estimate for the
weights is held constant over the iteration. Otherwise, it
is updated for each fit in the iteration. Default is True.
start_params : array_like, optional
Initial guess of the solution of the optimizer. If not provided,
the initial parameters are computed using OLS.
start_scale : float, optional
Initial scale. If update_scale is False, then the scale will be
fixed at this level for the estimation of the mean parameters.
during iteration. If not provided, then the initial scale is
estimated from the OLS residuals
Returns
-------
results : statsmodels.rlm.RLMresults
Results instance | fit | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def summary(self, yname=None, xname=None, title=0, alpha=.05,
return_fmt='text'):
"""
This is for testing the new summary setup
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['IRLS']),
('Norm:', [self.fit_options['norm']]),
('Scale Est.:', [self.fit_options['scale_est']]),
('Cov Type:', [self.fit_options['cov']]),
('Date:', None),
('Time:', None),
('No. Iterations:', ["%d" % self.fit_history['iteration']])
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None)
]
if title is not None:
title = "Robust linear Model Regression Results"
# boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
# add warnings/notes, added to text format only
etext = []
wstr = ("If the model instance has been used for another fit with "
"different fit parameters, then the fit options might not be "
"the correct ones anymore .")
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry | This is for testing the new summary setup | summary | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def summary2(self, xname=None, yname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary function for regression results
Parameters
----------
yname : str
Name of the dependent variable (optional)
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
return smry | Experimental summary function for regression results
Parameters
----------
yname : str
Name of the dependent variable (optional)
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results | summary2 | python | statsmodels/statsmodels | statsmodels/robust/robust_linear_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/robust_linear_model.py | BSD-3-Clause |
def coef_normalize_cov_truncated(frac, k_vars):
"""factor for consistency of truncated cov at normal distribution
This is usually denoted by `b`. Here, it is calculated as `1 / b`.
Trimming threshold is based on chisquare distribution.
Parameters
----------
frac: float in (0, 1)
fraction (probability) of observations that are not trimmed
k_vars : integer
number of variables, i.e. dimension of multivariate random variable
Returns
-------
fac : float
factor to multiply the raw trimmed covariance
Notes
-----
TODO: it might be better to use alpha = 1 - frac as argument instead.
Uses explicit formula from Riani, Cerioli and Torti (2014) equation (3)
which is also in Rocke and Woodroff (1996) Outliers equation (5).
References
----------
.. [1] Riani, Marco, Andrea Cerioli, and Francesca Torti. “On Consistency
Factors and Efficiency of Robust S-Estimators.” TEST 23, no. 2 (February
4, 2014): 356–87. https://doi.org/10.1007/s11749-014-0357-7.
.. [2] Rocke, David M., and David L. Woodruff. “Identification of Outliers
in Multivariate Data.” Journal of the American Statistical
Association 91, no. 435 (1996): 1047–61.
https://doi.org/10.2307/2291724.
"""
# todo: use isf(alpha, k_vars) instead?
fac = 1 / (stats.chi2.cdf(stats.chi2.ppf(frac, k_vars), k_vars + 2) / frac)
return fac | factor for consistency of truncated cov at normal distribution
This is usually denoted by `b`. Here, it is calculated as `1 / b`.
Trimming threshold is based on chisquare distribution.
Parameters
----------
frac: float in (0, 1)
fraction (probability) of observations that are not trimmed
k_vars : integer
number of variables, i.e. dimension of multivariate random variable
Returns
-------
fac : float
factor to multiply the raw trimmed covariance
Notes
-----
TODO: it might be better to use alpha = 1 - frac as argument instead.
Uses explicit formula from Riani, Cerioli and Torti (2014) equation (3)
which is also in Rocke and Woodroff (1996) Outliers equation (5).
References
----------
.. [1] Riani, Marco, Andrea Cerioli, and Francesca Torti. “On Consistency
Factors and Efficiency of Robust S-Estimators.” TEST 23, no. 2 (February
4, 2014): 356–87. https://doi.org/10.1007/s11749-014-0357-7.
.. [2] Rocke, David M., and David L. Woodruff. “Identification of Outliers
in Multivariate Data.” Journal of the American Statistical
Association 91, no. 435 (1996): 1047–61.
https://doi.org/10.2307/2291724. | coef_normalize_cov_truncated | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _reweight(x, loc, cov, trim_frac=0.975, ddof=1):
"""Reweighting step, trims data and computes Pearson covariance
Parameters
----------
x : ndarray
Multivariate data with observation in rows
loc : ndarray
Location, mean or center of the data.
cov : ndarray
Covariance for computing Mahalanobis distance
trim_frac : float in (0, 1)
# todo: change name
This is the coverage, (1 - trim_frac) is tail probability for chi2
distribution.
ddof : int or float
Delta degrees of freedom used for trimmed Pearson covariance
computed with `np.cov`.
Returns
-------
cov : ndarray
Covariance matrix of trimmed data, not rescaled to account for
trimming.
loc : ndarray
Mean of trimmed data.
See Also
--------
coef_normalize_cov_truncated
Notes
-----
This reweighting step is used in OGK and in literature also for MCD.
Trimming is metric with cutoff computed under the assumption that the
Mahalanobis distances are chi-square distributed.
References
----------
???
"""
beta = trim_frac
nobs, k_vars = x.shape # noqa: F841
# d = (((z - loc_z) / scale_z)**2).sum(1) # for orthogonal
d = mahalanobis(x - loc, cov)
# only hard thresholding right now
dmed = np.median(d)
cutoff = (dmed * stats.chi2.isf(1-beta, k_vars) /
stats.chi2.ppf(0.5, k_vars))
mask = d <= cutoff
sample = x[mask]
loc = sample.mean(0)
cov = np.cov(sample.T, ddof=ddof)
return cov, loc | Reweighting step, trims data and computes Pearson covariance
Parameters
----------
x : ndarray
Multivariate data with observation in rows
loc : ndarray
Location, mean or center of the data.
cov : ndarray
Covariance for computing Mahalanobis distance
trim_frac : float in (0, 1)
# todo: change name
This is the coverage, (1 - trim_frac) is tail probability for chi2
distribution.
ddof : int or float
Delta degrees of freedom used for trimmed Pearson covariance
computed with `np.cov`.
Returns
-------
cov : ndarray
Covariance matrix of trimmed data, not rescaled to account for
trimming.
loc : ndarray
Mean of trimmed data.
See Also
--------
coef_normalize_cov_truncated
Notes
-----
This reweighting step is used in OGK and in literature also for MCD.
Trimming is metric with cutoff computed under the assumption that the
Mahalanobis distances are chi-square distributed.
References
----------
??? | _reweight | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _rescale(x, loc, cov, prob=0.5):
"""rescale covariance to be consistent with normal distribution
This matches median of mahalanobis distance with the chi-square
distribution. This assumes that the data is normally distributed.
Parameters
----------
x : array-like
sample data, 2-dim with observation in rows
loc : ndarray
mean or center of data
cov : ndarray
covariance estimate
Returns
-------
ndarray: rescaled covariance
Notes
-----
This rescaling is used in several functions to compute rescaled
Mahalanobis distances for trimming.
"""
if prob != 0.5:
raise ValueError("currently only median prob=0.5 supported")
x = np.asarray(x)
k_vars = x.shape[1]
d = mahalanobis(x - loc, cov)
dmed = np.median(d)
fac = dmed / stats.chi2.ppf(prob, k_vars)
return cov * fac | rescale covariance to be consistent with normal distribution
This matches median of mahalanobis distance with the chi-square
distribution. This assumes that the data is normally distributed.
Parameters
----------
x : array-like
sample data, 2-dim with observation in rows
loc : ndarray
mean or center of data
cov : ndarray
covariance estimate
Returns
-------
ndarray: rescaled covariance
Notes
-----
This rescaling is used in several functions to compute rescaled
Mahalanobis distances for trimming. | _rescale | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _outlier_gy(d, distr=None, k_endog=1, trim_prob=0.975):
"""determine outlier fraction given reference distribution
This implements the outlier cutoff of Gervini and Yohai 2002
for use in efficient reweighting.
Parameters
----------
d : array_like, 1-D
array of squared standardized residuals or Mahalanobis distance
distr : None or distribution instance
reference distribution of d, needs cdf and ppf methods.
If None, then chisquare with k_endog degrees of freedom is
used. Otherwise, it should be a callable that provides the
cdf function
k_endog : int or float
used only if cdf is None. In that case, it provides the degrees
of freedom for the chisquare distribution.
trim_prob : float in (0.5, 1)
threshold for the tail probability at which the search for
trimming or outlier fraction starts.
Returns
-------
frac : float
fraction of outliers
cutoff : float
cutoff value, values with `d > cutoff` are considered outliers
ntail : int
number of outliers
ntail0 : int
initial number of outliers based on trim tail probability.
cutoff0 : float
initial cutoff value based on trim tail probability.
Notes
-----
This does not fully correct for multiple testing and does not
maintain a familywise error rate or false discovery rate.
The error rate goes to zero asymptotically under the null model,
i.e. if there are no outliers.
This might not handle threshold points correctly with discrete
distribution.
TODO: check weak versus strict inequalities (e.g. in isf)
This only checks the upper tail of the distribution and of `d`.
"""
d = np.asarray(d)
nobs = d.shape[0]
if distr is None:
distr = stats.chi2(k_endog)
threshold = distr.isf(1 - trim_prob)
# get sorted array, we only need upper tail
dtail = np.sort(d[d >= threshold])
ntail0 = len(dtail)
if ntail0 == 0:
# no values above threshold
return 0, threshold, 0, 0, threshold
# using (n-1) / n as in GY2002
ranks = np.arange(nobs - ntail0, nobs) / nobs
frac = np.maximum(0, distr.cdf(dtail) - ranks).max()
ntail = int(nobs * frac) # rounding down
if ntail > 0:
cutoff = dtail[-ntail - 1]
else:
cutoff = dtail[-1] + 1e-15 # not sure, check inequality
if (dtail > cutoff).sum() < ntail:
import warnings
warnings.warn('ties at cutoff, cutoff rule produces fewer'
'outliers than `ntail`')
return frac, cutoff, ntail, ntail0, threshold | determine outlier fraction given reference distribution
This implements the outlier cutoff of Gervini and Yohai 2002
for use in efficient reweighting.
Parameters
----------
d : array_like, 1-D
array of squared standardized residuals or Mahalanobis distance
distr : None or distribution instance
reference distribution of d, needs cdf and ppf methods.
If None, then chisquare with k_endog degrees of freedom is
used. Otherwise, it should be a callable that provides the
cdf function
k_endog : int or float
used only if cdf is None. In that case, it provides the degrees
of freedom for the chisquare distribution.
trim_prob : float in (0.5, 1)
threshold for the tail probability at which the search for
trimming or outlier fraction starts.
Returns
-------
frac : float
fraction of outliers
cutoff : float
cutoff value, values with `d > cutoff` are considered outliers
ntail : int
number of outliers
ntail0 : int
initial number of outliers based on trim tail probability.
cutoff0 : float
initial cutoff value based on trim tail probability.
Notes
-----
This does not fully correct for multiple testing and does not
maintain a familywise error rate or false discovery rate.
The error rate goes to zero asymptotically under the null model,
i.e. if there are no outliers.
This might not handle threshold points correctly with discrete
distribution.
TODO: check weak versus strict inequalities (e.g. in isf)
This only checks the upper tail of the distribution and of `d`. | _outlier_gy | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def mahalanobis(data, cov=None, cov_inv=None, sqrt=False):
"""Mahalanobis distance squared
Note: this is without taking the square root.
assumes data is already centered.
Parameters
----------
data : array-like
Multivariate data with observation in rows.
cov : None or ndarray
Covariance matrix used in computing distance.
This is only used if cov_inv is None.
cov_inv : None or ndarray
Inverse ovariance matrix used in computing distance.
One of cov and cov_inv needs to be provided.
sqrt : bool
If False, then the squared distance is returned.
If True, then the square root is returmend.
Return
------
ndarray : Mahalanobis distances or squared distance.
"""
# another option would be to allow also cov^{-0.5) as keyword
x = np.asarray(data)
if cov_inv is not None:
# einsum might be a bit faster
d = (x * cov_inv.dot(x.T).T).sum(1)
elif cov is not None:
d = (x * np.linalg.solve(cov, x.T).T).sum(1)
else:
raise ValueError('either cov or cov_inv needs to be given')
if sqrt:
d = np.sqrt(d)
return d | Mahalanobis distance squared
Note: this is without taking the square root.
assumes data is already centered.
Parameters
----------
data : array-like
Multivariate data with observation in rows.
cov : None or ndarray
Covariance matrix used in computing distance.
This is only used if cov_inv is None.
cov_inv : None or ndarray
Inverse ovariance matrix used in computing distance.
One of cov and cov_inv needs to be provided.
sqrt : bool
If False, then the squared distance is returned.
If True, then the square root is returmend.
Return
------
ndarray : Mahalanobis distances or squared distance. | mahalanobis | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_gk1(x, y, scale_func=mad):
"""Gnanadesikan and Kettenring covariance between two variables.
Parameters
----------
x : ndarray
Data array.
y : ndarray
Data array
scale_func : callable
Scale function used in computing covariance.
Default is median absolute deviation, MAD.
Returns
-------
ndarray : GK covariance between x and y.
"""
s1 = scale_func(x + y)
s2 = scale_func(x - y)
return (s1**2 - s2**2) / 4 | Gnanadesikan and Kettenring covariance between two variables.
Parameters
----------
x : ndarray
Data array.
y : ndarray
Data array
scale_func : callable
Scale function used in computing covariance.
Default is median absolute deviation, MAD.
Returns
-------
ndarray : GK covariance between x and y. | cov_gk1 | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_gk(data, scale_func=mad):
"""Gnanadesikan and Kettenring covariance matrix estimator
Parameters
----------
data : ndarray
Multivariate data array with observations in rows.
scale_func : callable
Scale function used in computing covariance.
Default is median absolute deviation, MAD.
Returns
-------
ndarray : GK covariance matrix of the data
Notes
-----
This uses a loop over pairs of variables with cov_gk1 to avoid large
intermediate arrays.
"""
x = np.asarray(data)
if x.ndim != 2:
raise ValueError('data needs to be two dimensional')
nobs, k_vars = x.shape # noqa: F841
cov = np.diag(scale_func(x)**2)
for i in range(k_vars):
for j in range(i):
cij = cov_gk1(x[:, i], x[:, j], scale_func=scale_func)
cov[i, j] = cov[j, i] = cij
return cov | Gnanadesikan and Kettenring covariance matrix estimator
Parameters
----------
data : ndarray
Multivariate data array with observations in rows.
scale_func : callable
Scale function used in computing covariance.
Default is median absolute deviation, MAD.
Returns
-------
ndarray : GK covariance matrix of the data
Notes
-----
This uses a loop over pairs of variables with cov_gk1 to avoid large
intermediate arrays. | cov_gk | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_tyler(data, start_cov=None, normalize=False, maxiter=100, eps=1e-13):
"""Tyler's M-estimator for normalized covariance (scatter)
The underlying (population) mean of the data is assumed to be zero.
Parameters
----------
data : array-like
data array with observations in rows and variables in columns
start_cov : None or ndarray
starting covariance for iterative solution
normalize : False or string
If normalize is False (default), then the unscaled tyler scatter matrix
is returned.
Three types of normalization, i.e. rescaling are available by defining
string option:
- "trace" :
The scatter matrix is normalized to have trace equal to the number
of columns in the data.
- "det" :
The scatter matrix is normalized to have determinant equal to 1.
- "normal" :
The scatter matrix is rescaled to be consistent when data is normally
distributed. Rescaling is based on median of the mahalanobis
distances and assuming chisquare distribution of the distances.
- "weights" :
The scatter matrix is rescaled by the sum of weights.
see Ollila et al 2023
maxiter : int
maximum number of iterations to find the solution.
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
Holder instance with the following attributes
cov : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
References
----------
.. [1] Tyler, David E. “A Distribution-Free M-Estimator of Multivariate
Scatter.” The Annals of Statistics 15, no. 1 (March 1, 1987): 234–51.
.. [2] Soloveychik, I., and A. Wiesel. 2014. Tyler's Covariance Matrix
Estimator in Elliptical Models With Convex Structure.
IEEE Transactions on Signal Processing 62 (20): 5251-59.
doi:10.1109/TSP.2014.2348951.
.. [3] Ollila, Esa, Daniel P. Palomar, and Frederic Pascal.
“Affine Equivariant Tyler’s M-Estimator Applied to Tail Parameter
Learning of Elliptical Distributions.” arXiv, May 7, 2023.
https://doi.org/10.48550/arXiv.2305.04330.
"""
x = np.asarray(data)
nobs, k_vars = x.shape
# kn = k_vars * 1. / nobs
if start_cov is not None:
c = start_cov
else:
c = np.diag(mad(x, center=0)**2)
# Tyler's M-estimator of shape (scatter) matrix
for i in range(maxiter):
# this is old code, slower than new version, but more literal
# c_inv = np.linalg.pinv(c)
# c_old = c
# c = kn * sum(np.outer(xi, xi) / np.inner(xi, c_inv.dot(xi))
# for xi in x)
c_old = c
ichol, _ = dtrtri(linalg.cholesky(c, lower=False), lower=0)
v = x @ ichol
dist_mahal_2 = np.einsum('ij,ji->i', v, v.T)
weights = k_vars / dist_mahal_2[:, None]
xw = np.sqrt(weights) * x
c = xw.T @ xw / nobs
diff = np.max(np.abs(c - c_old))
if diff < eps:
break
if normalize is False or normalize is None:
pass
elif normalize == "trace":
c /= np.trace(c) / k_vars
elif normalize == "det":
c /= np.linalg.det(c)**(1. / k_vars)
elif normalize == "normal":
_rescale(x, np.zeros(k_vars), c, prob=0.5)
elif normalize == "weights":
c /= weights.mean() / (np.trace(c) / k_vars)
else:
msg = 'normalize needs to be False, "trace", "det" or "normal"'
raise ValueError(msg)
return Holder(cov=c, n_iter=i, method="tyler") | Tyler's M-estimator for normalized covariance (scatter)
The underlying (population) mean of the data is assumed to be zero.
Parameters
----------
data : array-like
data array with observations in rows and variables in columns
start_cov : None or ndarray
starting covariance for iterative solution
normalize : False or string
If normalize is False (default), then the unscaled tyler scatter matrix
is returned.
Three types of normalization, i.e. rescaling are available by defining
string option:
- "trace" :
The scatter matrix is normalized to have trace equal to the number
of columns in the data.
- "det" :
The scatter matrix is normalized to have determinant equal to 1.
- "normal" :
The scatter matrix is rescaled to be consistent when data is normally
distributed. Rescaling is based on median of the mahalanobis
distances and assuming chisquare distribution of the distances.
- "weights" :
The scatter matrix is rescaled by the sum of weights.
see Ollila et al 2023
maxiter : int
maximum number of iterations to find the solution.
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
Holder instance with the following attributes
cov : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
References
----------
.. [1] Tyler, David E. “A Distribution-Free M-Estimator of Multivariate
Scatter.” The Annals of Statistics 15, no. 1 (March 1, 1987): 234–51.
.. [2] Soloveychik, I., and A. Wiesel. 2014. Tyler's Covariance Matrix
Estimator in Elliptical Models With Convex Structure.
IEEE Transactions on Signal Processing 62 (20): 5251-59.
doi:10.1109/TSP.2014.2348951.
.. [3] Ollila, Esa, Daniel P. Palomar, and Frederic Pascal.
“Affine Equivariant Tyler’s M-Estimator Applied to Tail Parameter
Learning of Elliptical Distributions.” arXiv, May 7, 2023.
https://doi.org/10.48550/arXiv.2305.04330. | cov_tyler | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_tyler_regularized(data, start_cov=None, normalize=False,
shrinkage_factor=None,
maxiter=100, eps=1e-13):
"""Regularized Tyler's M-estimator for normalized covariance (shape).
The underlying (population) mean of the data is assumed to be zero.
Parameters
----------
data : ndarray
data array with observations in rows and variables in columns.
start_cov : None or ndarray
starting covariance for iterative solution
normalize : bool
If True, then the scatter matrix is normalized to have trace equalt
to the number of columns in the data.
shrinkage_factor : None or float in [0, 1]
Shrinkage for the scatter estimate. If it is zero, then no shrinkage
is performed. If it is None, then the shrinkage factor will be
determined by a plugin estimator
maxiter : int
maximum number of iterations to find the solution
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
result instance with the following attributes
cov : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
shrinkage_factor : float
shrinkage factor that was used in the estimation. This will be the
same as the function argument if it was not None.
Notes
-----
If the shrinkage factor is None, then a plugin is used as described in
Chen and Wiesel 2011. The required trace for a pilot scatter estimate is
obtained by the covariance rescaled by MAD estimate for the variance.
References
----------
.. [1] Chen, Yilun, A. Wiesel, and A.O. Hero. “Robust Shrinkage
Estimation of High-Dimensional Covariance Matrices.” IEEE Transactions
on Signal Processing 59, no. 9 (September 2011): 4097–4107.
https://doi.org/10.1109/TSP.2011.2138698.
"""
x = np.asarray(data)
nobs, k_vars = x.shape
kn = k_vars * 1. / nobs
# calculate MAD only once if needed
if start_cov is None or shrinkage_factor is None:
scale_mad = mad(x, center=0)
corr = None
if shrinkage_factor is None:
# maybe some things here are redundant
xd = x / x.std(0) # scale_mad
corr = xd.T.dot(xd)
corr * np.outer(scale_mad, scale_mad)
corr *= k_vars / np.trace(corr)
tr = np.trace(corr.dot(corr))
n, k = nobs, k_vars
# Chen and Wiesel 2011 equation (13)
sf = k*k + (1 - 2./k) * tr
sf /= (k*k - n*k - 2*n) + (n + 1 + 2. * (n - 1.) / k) * tr
shrinkage_factor = sf
if start_cov is not None:
c = start_cov
else:
c = np.diag(scale_mad**2)
identity = np.eye(k_vars)
for i in range(maxiter):
c_inv = np.linalg.pinv(c)
c_old = c
# this could be vectorized but could use a lot of memory
# TODO: try to work in vectorized batches
c0 = kn * sum(np.outer(xi, xi) / np.inner(xi, c_inv.dot(xi))
for xi in x)
if shrinkage_factor != 0:
c = (1 - shrinkage_factor) * c0 + shrinkage_factor * identity
else:
c = c0
c *= k_vars / np.trace(c)
diff = np.max(np.abs(c - c_old))
if diff < eps:
break
res = Holder(cov=c, n_iter=i, shrinkage_factor=shrinkage_factor,
corr=corr)
return res | Regularized Tyler's M-estimator for normalized covariance (shape).
The underlying (population) mean of the data is assumed to be zero.
Parameters
----------
data : ndarray
data array with observations in rows and variables in columns.
start_cov : None or ndarray
starting covariance for iterative solution
normalize : bool
If True, then the scatter matrix is normalized to have trace equalt
to the number of columns in the data.
shrinkage_factor : None or float in [0, 1]
Shrinkage for the scatter estimate. If it is zero, then no shrinkage
is performed. If it is None, then the shrinkage factor will be
determined by a plugin estimator
maxiter : int
maximum number of iterations to find the solution
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
result instance with the following attributes
cov : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
shrinkage_factor : float
shrinkage factor that was used in the estimation. This will be the
same as the function argument if it was not None.
Notes
-----
If the shrinkage factor is None, then a plugin is used as described in
Chen and Wiesel 2011. The required trace for a pilot scatter estimate is
obtained by the covariance rescaled by MAD estimate for the variance.
References
----------
.. [1] Chen, Yilun, A. Wiesel, and A.O. Hero. “Robust Shrinkage
Estimation of High-Dimensional Covariance Matrices.” IEEE Transactions
on Signal Processing 59, no. 9 (September 2011): 4097–4107.
https://doi.org/10.1109/TSP.2011.2138698. | cov_tyler_regularized | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_tyler_pairs_regularized(data_iterator, start_cov=None, normalize=False,
shrinkage_factor=None, nobs=None, k_vars=None,
maxiter=100, eps=1e-13):
"""Tyler's M-estimator for normalized covariance (scatter)
The underlying (population) mean of the data is assumed to be zero.
experimental, calculation of startcov and shrinkage factor doesn't work
This is intended for cluster robust and HAC covariance matrices that need
to iterate over pairs of observations that are correlated.
Parameters
----------
data_iterator : restartable iterator
needs to provide three elements xi, xj and w
start_cov : None or ndarray
starting covariance for iterative solution
normalize : bool
If True, then the scatter matrix is normalized to have trace equalt
to the number of columns in the data.
shrinkage_factor : None or float in [0, 1]
Shrinkage for the scatter estimate. If it is zero, then no shrinkage
is performed. If it is None, then the shrinkage factor will be
determined by a plugin estimator
maxiter : int
maximum number of iterations to find the solution
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
scatter : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
shrinkage_factor : float
shrinkage factor that was used in the estimation. This will be the
same as the function argument if it was not None.
Notes
-----
If the shrinkage factor is None, then a plugin is used as described in
Chen and Wiesel 2011. The required trace for a pilot scatter estimate is
obtained by the covariance rescaled by MAD estimate for the variance.
References
----------
.. [1] Chen, Yilun, A. Wiesel, and A.O. Hero. “Robust Shrinkage Estimation
of High-Dimensional Covariance Matrices.” IEEE Transactions on Signal
Processing 59, no. 9 (September 2011): 4097–4107.
https://doi.org/10.1109/TSP.2011.2138698.
"""
x = data_iterator
# x = np.asarray(data)
# nobs, k_vars = x.shape
# calculate MAD only once if needed
if start_cov is None or shrinkage_factor is None:
scale_mad = mad(x, center=0)
corr = None
if shrinkage_factor is None:
# maybe some things here are redundant
xd = x / x.std(0) # scale_mad
corr = xd.T.dot(xd)
corr * np.outer(scale_mad, scale_mad)
corr *= k_vars / np.trace(corr)
tr = np.trace(corr.dot(corr))
n, k = nobs, k_vars
# Chen and Wiesel 2011 equation (13)
sf = k*k + (1 - 2./k) * tr
sf /= (k*k - n*k - 2*n) + (n + 1 + 2. * (n - 1.) / k) * tr
shrinkage_factor = sf
if start_cov is not None:
c = start_cov
else:
c = np.diag(scale_mad**2)
identity = np.eye(k_vars)
kn = k_vars * 1. / nobs
for i in range(maxiter):
c_inv = np.linalg.pinv(c)
c_old = c
# this could be vectorized but could use a lot of memory
# TODO: try to work in vectorized batches
# weights is a problem if iterator should be ndarray
# c0 = kn * sum(np.outer(xi, xj) / np.inner(xi, c_inv.dot(xj))
# for xi, xj in x)
c0 = kn * sum(np.outer(xij[0], xij[1]) /
np.inner(xij[0], c_inv.dot(xij[1])) for xij in x)
if shrinkage_factor != 0:
c = (1 - shrinkage_factor) * c0 + shrinkage_factor * identity
else:
c = c0
c *= k_vars / np.trace(c)
diff = np.max(np.abs(c - c_old))
if diff < eps:
break
res = Holder(cov=c, n_iter=i, shrinkage_factor=shrinkage_factor,
corr=corr)
return res | Tyler's M-estimator for normalized covariance (scatter)
The underlying (population) mean of the data is assumed to be zero.
experimental, calculation of startcov and shrinkage factor doesn't work
This is intended for cluster robust and HAC covariance matrices that need
to iterate over pairs of observations that are correlated.
Parameters
----------
data_iterator : restartable iterator
needs to provide three elements xi, xj and w
start_cov : None or ndarray
starting covariance for iterative solution
normalize : bool
If True, then the scatter matrix is normalized to have trace equalt
to the number of columns in the data.
shrinkage_factor : None or float in [0, 1]
Shrinkage for the scatter estimate. If it is zero, then no shrinkage
is performed. If it is None, then the shrinkage factor will be
determined by a plugin estimator
maxiter : int
maximum number of iterations to find the solution
eps : float
convergence criterion. The maximum absolute distance needs to be
smaller than eps for convergence.
Returns
-------
scatter : ndarray
estimate of the scatter matrix
iter : int
number of iterations used in finding a solution. If iter is less than
maxiter, then the iteration converged.
shrinkage_factor : float
shrinkage factor that was used in the estimation. This will be the
same as the function argument if it was not None.
Notes
-----
If the shrinkage factor is None, then a plugin is used as described in
Chen and Wiesel 2011. The required trace for a pilot scatter estimate is
obtained by the covariance rescaled by MAD estimate for the variance.
References
----------
.. [1] Chen, Yilun, A. Wiesel, and A.O. Hero. “Robust Shrinkage Estimation
of High-Dimensional Covariance Matrices.” IEEE Transactions on Signal
Processing 59, no. 9 (September 2011): 4097–4107.
https://doi.org/10.1109/TSP.2011.2138698. | cov_tyler_pairs_regularized | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def cov_weighted(data, weights, center=None, weights_cov=None,
weights_cov_denom=None, ddof=1):
"""weighted mean and covariance (for M-estimators)
wmean = sum (weights * data) / sum(weights)
wcov = sum (weights_cov * data_i data_i') / weights_cov_denom
The options for weights_cov_denom are described in Parameters.
By default both mean and cov are averages based on the same
weights.
Parameters
----------
data : array_like, 2-D
observations in rows, variables in columns
no missing value handling
weights : ndarray, 1-D
weights array with length equal to the number of observations
center : None or ndarray (optional)
If None, then the weighted mean is subtracted from the data
If center is provided, then it is used instead of the
weighted mean.
weights_cov : None, ndarray or "det" (optional)
If None, then the same weights as for the mean are used.
weights_cov_denom : None, float or "det" (optional)
specified the denominator for the weighted covariance
If None, then the sum of weights - ddof are used and the covariance is
an average cross product.
If "det", then the weighted covariance is normalized such that
det(wcov) is 1.
If weights_cov_denom is 1, then the weighted cross product is returned
without averaging or scaling (sum of squares).
Otherwise it is used directly as denominator after subtracting
ddof.
ddof : int or float
covariance degrees of freedom correction, only used if
weights_cov_denom is None or a float.
Notes
-----
The extra options are available to cover the general M-estimator
for location and scatter with estimating equations (using data x):
sum (weights * (x - m)) = 0
sum (weights_cov * (x_i - m) * (x_i - m)') - weights_cov_denom * cov = 0
where the weights are functions of the mahalonibis distance of the
residuals, and m is the mean.
In the default case
wmean = ave (w_i x_i)
wcov = ave (w_i (x_i - m) (x_i - m)')
References
----------
.. [1] Rocke, D. M., and D. L. Woodruff. 1993. Computation of Robust
Estimates of Multivariate Location and Shape.
Statistica Neerlandica 47 (1): 27-42.
doi:10.1111/j.1467-9574.1993.tb01404.x.
"""
wsum = weights.sum()
if weights_cov is None:
weights_cov = weights
wsum_cov = wsum
else:
wsum_cov = None # calculate below only if needed
if center is None:
wmean = weights.dot(data) / wsum
else:
wmean = center
xdm = data - wmean
wcov = (weights_cov * xdm.T).dot(xdm)
if weights_cov_denom is None:
if wsum_cov is None:
wsum_cov = weights_cov.sum()
wcov /= (wsum_cov - ddof) # * np.sum(weights_cov**2) / wsum_cov)
elif weights_cov_denom == "det":
wcov /= np.linalg.det(wcov)**(1 / wcov.shape[0])
elif weights_cov_denom == 1:
pass
else:
wcov /= (weights_cov_denom - ddof)
return wcov, wmean | weighted mean and covariance (for M-estimators)
wmean = sum (weights * data) / sum(weights)
wcov = sum (weights_cov * data_i data_i') / weights_cov_denom
The options for weights_cov_denom are described in Parameters.
By default both mean and cov are averages based on the same
weights.
Parameters
----------
data : array_like, 2-D
observations in rows, variables in columns
no missing value handling
weights : ndarray, 1-D
weights array with length equal to the number of observations
center : None or ndarray (optional)
If None, then the weighted mean is subtracted from the data
If center is provided, then it is used instead of the
weighted mean.
weights_cov : None, ndarray or "det" (optional)
If None, then the same weights as for the mean are used.
weights_cov_denom : None, float or "det" (optional)
specified the denominator for the weighted covariance
If None, then the sum of weights - ddof are used and the covariance is
an average cross product.
If "det", then the weighted covariance is normalized such that
det(wcov) is 1.
If weights_cov_denom is 1, then the weighted cross product is returned
without averaging or scaling (sum of squares).
Otherwise it is used directly as denominator after subtracting
ddof.
ddof : int or float
covariance degrees of freedom correction, only used if
weights_cov_denom is None or a float.
Notes
-----
The extra options are available to cover the general M-estimator
for location and scatter with estimating equations (using data x):
sum (weights * (x - m)) = 0
sum (weights_cov * (x_i - m) * (x_i - m)') - weights_cov_denom * cov = 0
where the weights are functions of the mahalonibis distance of the
residuals, and m is the mean.
In the default case
wmean = ave (w_i x_i)
wcov = ave (w_i (x_i - m) (x_i - m)')
References
----------
.. [1] Rocke, D. M., and D. L. Woodruff. 1993. Computation of Robust
Estimates of Multivariate Location and Shape.
Statistica Neerlandica 47 (1): 27-42.
doi:10.1111/j.1467-9574.1993.tb01404.x. | cov_weighted | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def weights_mvt(distance, df, k_vars):
"""weight function based on multivariate t distribution
Parameters
----------
distance : ndarray
mahalanobis distance
df : int or float
degrees of freedom of the t distribution
k_vars : int
number of variables in the multivariate sample
Returns
-------
weights : ndarray
weights calculated for the given distances.
References
----------
.. [1] Finegold, Michael A., and Mathias Drton. 2014. Robust Graphical
Modeling with T-Distributions. arXiv:1408.2033 [Cs, Stat], August.
http://arxiv.org/abs/1408.2033.
.. [2] Finegold, Michael, and Mathias Drton. 2011. Robust graphical
modeling of gene networks using classical and alternative
t-distributions. The Annals of Applied Statistics 5 (2A): 1057-80.
"""
w = (df + k_vars) / (df + distance)
return w | weight function based on multivariate t distribution
Parameters
----------
distance : ndarray
mahalanobis distance
df : int or float
degrees of freedom of the t distribution
k_vars : int
number of variables in the multivariate sample
Returns
-------
weights : ndarray
weights calculated for the given distances.
References
----------
.. [1] Finegold, Michael A., and Mathias Drton. 2014. Robust Graphical
Modeling with T-Distributions. arXiv:1408.2033 [Cs, Stat], August.
http://arxiv.org/abs/1408.2033.
.. [2] Finegold, Michael, and Mathias Drton. 2011. Robust graphical
modeling of gene networks using classical and alternative
t-distributions. The Annals of Applied Statistics 5 (2A): 1057-80. | weights_mvt | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def weights_quantile(distance, frac=0.5, rescale=True):
"""Weight function for cutoff weights.
The weight function is an indicator function for distances smaller then
the frac quantile.
rescale option is not supported.
"""
cutoff = np.percentile(distance, frac * 100)
w = (distance < cutoff).astype(int)
return w | Weight function for cutoff weights.
The weight function is an indicator function for distances smaller then
the frac quantile.
rescale option is not supported. | weights_quantile | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _cov_iter(data, weights_func, weights_args=None, cov_init=None,
rescale='med', maxiter=3, atol=1e-14, rtol=1e-6):
"""Iterative robust covariance estimation using weights.
This is in the style of M-estimators for given weight function.
Note: ??? Whether this is normalized to be consistent with the
multivariate normal case depends on the weight function.
maybe it is consistent, it's just a weighted cov.
TODO: options for rescale instead of just median
Parameters
----------
data : array_like
weights_func : callable
function to calculate weights from the distances and weights_args
weights_args : tuple
extra arguments for the weights_func
cov_init : ndarray, square 2-D
initial covariance matrix
rescale : "med" or "none"
If "med" then the resulting covariance matrix is normalized so it is
approximately consistent with the normal distribution. Rescaling is
based on the median of the distances and of the chisquare distribution.
Other options are not yet available.
If rescale is the string "none", then no rescaling is performed.
Returns
-------
Holder instance with attributes: cov, mean, w, dist, it, converged
Notes
-----
This iterates over calculating the mahalanobis distance and weighted
covariance. See Feingold and Drton 2014 for the motivation using weights
based on the multivariate t distribution. Note that this does not calculate
their alternative t distribution which requires numerical or Monte Carlo
integration.
References
----------
.. [1] Finegold, Michael, and Mathias Drton. 2011. Robust graphical
modeling of gene networks using classical and alternative
t-distributions. Annals of Applied Statistics 5 (2A): 1057-80.
"""
data = np.asarray(data)
nobs, k_vars = data.shape # noqa: F841
if cov_init is None:
cov_init = np.cov(data.T)
converged = False
cov = cov_old = cov_init
for it in range(maxiter):
dist = mahalanobis(data, cov=cov)
w = weights_func(dist, *weights_args)
cov, mean = cov_weighted(data, w)
if np.allclose(cov, cov_old, atol=atol, rtol=rtol):
converged = True
break
# recompute maha distance at final estimate
dist = mahalanobis(data, cov=cov)
if rescale == 'none':
s = 1
elif rescale == 'med':
s = np.median(dist) / stats.chi2.ppf(0.5, k_vars)
cov *= s
else:
raise NotImplementedError('only rescale="med" is currently available')
res = Holder(cov=cov, mean=mean, weights=w, mahalanobis=dist,
scale_factor=s, n_iter=it, converged=converged,
method="m-estimator",
weights_func=weights_func,
)
return res | Iterative robust covariance estimation using weights.
This is in the style of M-estimators for given weight function.
Note: ??? Whether this is normalized to be consistent with the
multivariate normal case depends on the weight function.
maybe it is consistent, it's just a weighted cov.
TODO: options for rescale instead of just median
Parameters
----------
data : array_like
weights_func : callable
function to calculate weights from the distances and weights_args
weights_args : tuple
extra arguments for the weights_func
cov_init : ndarray, square 2-D
initial covariance matrix
rescale : "med" or "none"
If "med" then the resulting covariance matrix is normalized so it is
approximately consistent with the normal distribution. Rescaling is
based on the median of the distances and of the chisquare distribution.
Other options are not yet available.
If rescale is the string "none", then no rescaling is performed.
Returns
-------
Holder instance with attributes: cov, mean, w, dist, it, converged
Notes
-----
This iterates over calculating the mahalanobis distance and weighted
covariance. See Feingold and Drton 2014 for the motivation using weights
based on the multivariate t distribution. Note that this does not calculate
their alternative t distribution which requires numerical or Monte Carlo
integration.
References
----------
.. [1] Finegold, Michael, and Mathias Drton. 2011. Robust graphical
modeling of gene networks using classical and alternative
t-distributions. Annals of Applied Statistics 5 (2A): 1057-80. | _cov_iter | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _cov_starting(data, standardize=False, quantile=0.5, retransform=False):
"""compute some robust starting covariances
The returned covariance matrices are intended as starting values
for further processing. The main purpose is for algorithms with high
breakdown point.
The quality as standalone covariance matrices varies and might not
be very good.
Preliminary version. This will still be changed. Options and defaults can
change, additional covarince methods will be added and return extended.
Parameters
----------
data : array-like
Multivariate data with observations in rows (axis=0).
standardize : bool
If False, then the data is only centered (by median).
If True, then the data is standardized using median and mad-scale.
This scaling is only intermediate, the returned covariance compensates
for the initial scaling.
quantile : float in [0.5, 1]
Parameter used for `_cov_iter` estimation.
Returns
-------
list of Holder instances with `cov` attribute.
"""
x = np.asarray(data)
nobs, k_vars = x.shape
if standardize:
# there should be a helper function/class
center = np.median(data, axis=0)
xs = (x - center)
std = mad0(data)
xs /= std
else:
center = np.median(data, axis=0)
xs = x - center
std = 1
cov_all = []
d = mahalanobis(xs, cov=None, cov_inv=np.eye(k_vars))
percentiles = [(k_vars+2) / nobs * 100 * 2, 25, 50, 85]
cutoffs = np.percentile(d, percentiles)
for p, cutoff in zip(percentiles, cutoffs):
xsp = xs[d < cutoff]
c = np.cov(xsp.T)
corr_factor = coef_normalize_cov_truncated(p / 100, k_vars)
c0 = Holder(
cov=c * corr_factor,
mean=xsp.mean(0) * std + center,
method="pearson truncated",
)
c01 = _cov_iter(xs, weights_quantile, weights_args=(quantile,),
rescale="med", cov_init=c0.cov, maxiter=100)
c02 = Holder(
cov=_naive_ledoit_wolf_shrinkage(xsp, 0).cov * corr_factor,
mean=xsp.mean(0) * std + center,
method="ledoit_wolf",
)
c03 = _cov_iter(xs, weights_quantile, weights_args=(quantile,),
rescale="med", cov_init=c02.cov, maxiter=100)
if not standardize or not retransform:
cov_all.extend([c0, c01, c02, c03])
else:
# compensate for initial rescaling
# TODO: this does not return list of Holder anymore
s = np.outer(std, std)
cov_all.extend([r.cov * s for r in [c0, c01, c02, c03]])
c2 = cov_ogk(xs)
cov_all.append(c2)
c2raw = Holder(
cov=c2.cov_raw,
mean=c2.loc_raw * std + center,
method="ogk_raw",
)
cov_all.append(c2raw)
z_tanh = np.tanh(xs)
c_th = Holder(
cov=np.corrcoef(z_tanh.T), # not consistently scaled for cov
mean=center, # TODO: do we add inverted mean z_tanh ?
method="tanh",
)
cov_all.append(c_th)
x_spatial = xs / np.sqrt(np.sum(xs**2, axis=1))[:, None]
c_th = Holder(
cov=np.cov(x_spatial.T),
mean=center,
method="spatial",
)
cov_all.append(c_th)
c_th = Holder(
# not consistently scaled for cov
# cov=stats.spearmanr(xs)[0], # not correct shape if k=1 or 2
cov=corr_rank(xs), # always returns matrix, np.corrcoef result
mean=center,
method="spearman",
)
cov_all.append(c_th)
c_ns = Holder(
cov=corr_normal_scores(xs), # not consistently scaled for cov
mean=center, # TODO: do we add inverted mean z_tanh ?
method="normal-scores",
)
cov_all.append(c_ns)
# TODO: rescale back to original space using center and std
return cov_all | compute some robust starting covariances
The returned covariance matrices are intended as starting values
for further processing. The main purpose is for algorithms with high
breakdown point.
The quality as standalone covariance matrices varies and might not
be very good.
Preliminary version. This will still be changed. Options and defaults can
change, additional covarince methods will be added and return extended.
Parameters
----------
data : array-like
Multivariate data with observations in rows (axis=0).
standardize : bool
If False, then the data is only centered (by median).
If True, then the data is standardized using median and mad-scale.
This scaling is only intermediate, the returned covariance compensates
for the initial scaling.
quantile : float in [0.5, 1]
Parameter used for `_cov_iter` estimation.
Returns
-------
list of Holder instances with `cov` attribute. | _cov_starting | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _orthogonalize_det(x, corr, loc_func, scale_func):
"""Orthogonalize
This is a simplified version of the OGK method.
version from DetMCD works on zscored data
(does not return mean and cov of original data)
so we drop the compensation for scaling in zscoring
z is the data here, zscored with robust estimators,
e.g. median and Qn in DetMCD
"""
evals, evecs = np.linalg.eigh(corr) # noqa: F841
z = x.dot(evecs)
transf0 = evecs
scale_z = scale_func(z) # scale of principal components
cov = (transf0 * scale_z**2).dot(transf0.T)
# extra step in DetMCD, sphering data with new cov to compute center
# I think this is equivalent to scaling z
# loc_z = loc_func(z / scale_z) * scale_z # center of principal components
# loc = (transf0 * scale_z).dot(loc_z)
transf1 = (transf0 * scale_z).dot(transf0.T)
# transf1inv = (transf0 * scale_z**(-1)).dot(transf0.T)
# loc = loc_func(x @ transf1inv) @ transf1
loc = loc_func((z / scale_z).dot(transf0.T)) @ transf1
return loc, cov | Orthogonalize
This is a simplified version of the OGK method.
version from DetMCD works on zscored data
(does not return mean and cov of original data)
so we drop the compensation for scaling in zscoring
z is the data here, zscored with robust estimators,
e.g. median and Qn in DetMCD | _orthogonalize_det | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _get_detcov_startidx(z, h, options_start=None, methods_cov="all"):
"""Starting sets for deterministic robust covariance estimators.
These are intended as starting sets for DetMCD, DetS and DetMM.
"""
if options_start is None:
options_start = {}
loc_func = options_start.get("loc_func", median)
scale_func = options_start.get("scale_func", mad)
z = (z - loc_func(z)) / scale_func(z)
if np.squeeze(z).ndim == 1:
# only one random variable
z = np.squeeze(z)
nobs = z.shape[0]
idx_sel = np.argpartition(np.abs(z), h)[:h]
idx_all = [(idx_sel, "abs-resid")]
# next uses symmetric equal-tail trimming
idx_sorted = np.argsort(z)
h_tail = (nobs - h) // 2
idx_all.append((idx_sorted[h_tail : h_tail + h], "trimmed-tail"))
return idx_all
# continue if more than 1 random variable
cov_all = _cov_starting(z, standardize=False, quantile=0.5)
# orthogonalization step
idx_all = []
for c in cov_all:
if not hasattr(c, "method"):
continue
method = c.method
mean, cov = _orthogonalize_det(z, c.cov, loc_func, scale_func)
d = mahalanobis(z, mean, cov)
idx_sel = np.argpartition(d, h)[:h]
idx_all.append((idx_sel, method))
return idx_all | Starting sets for deterministic robust covariance estimators.
These are intended as starting sets for DetMCD, DetS and DetMM. | _get_detcov_startidx | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _fit_mean_shape(self, mean, shape, scale):
"""Estimate mean and shape in iteration step.
This does only one step.
Parameters
----------
mean : ndarray
Starting value for mean
shape : ndarray
Starting value for shape matrix.
scale : float
Starting value for scale.
Returns
-------
Holder instance with updated estimates.
"""
d = mahalanobis(self.data - mean, shape, sqrt=True) / scale
weights_mean = self.weights_mean(d)
weights_cov = self.weights_scatter(d)
res = cov_weighted(
self.data,
weights=weights_mean,
center=None,
weights_cov=weights_cov,
weights_cov_denom="det",
ddof=1,
)
return res | Estimate mean and shape in iteration step.
This does only one step.
Parameters
----------
mean : ndarray
Starting value for mean
shape : ndarray
Starting value for shape matrix.
scale : float
Starting value for scale.
Returns
-------
Holder instance with updated estimates. | _fit_mean_shape | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _fit_scale(self, maha, start_scale=None, maxiter=100, rtol=1e-5,
atol=1e-5):
"""Estimate iterated M-scale.
Parameters
----------
maha : ndarray
start_scale : None or float
Starting scale. If it is None, the mad of maha wi
maxiter : int
Maximum iterations to compute M-scale
rtol, atol : float
Relative and absolute convergence criteria for scale used with
allclose.
Returns
-------
float : scale estimate
"""
if start_scale is None:
# TODO: this does not really make sense
# better scale to median of maha and chi or chi2
start_scale = mad(maha)
scale = rscale._scale_iter(
maha,
scale0=start_scale,
maxiter=maxiter,
rtol=rtol,
atol=atol,
meef_scale=self.rho,
scale_bias=self.scale_bias,
)
return scale | Estimate iterated M-scale.
Parameters
----------
maha : ndarray
start_scale : None or float
Starting scale. If it is None, the mad of maha wi
maxiter : int
Maximum iterations to compute M-scale
rtol, atol : float
Relative and absolute convergence criteria for scale used with
allclose.
Returns
-------
float : scale estimate | _fit_scale | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def fit(self, start_mean=None, start_shape=None, start_scale=None,
maxiter=100, update_scale=True):
"""Estimate mean, shape and scale parameters with MM-estimator.
Parameters
----------
start_mean : None or float
Starting value for mean, center.
If None, then median is used.
start_shape : None or 2-dim ndarray
Starting value of shape matrix, i.e. scatter matrix normalized
to det(scatter) = 1.
If None, then scaled covariance matrix of data is used.
start_scale : None or float.
Starting value of scale.
maxiter : int
Maximum number of iterations.
update_scale : bool
If update_scale is False, then
Returns
-------
results instance with mean, shape, scale, cov and other attributes.
Notes
-----
If start_scale is provided and update_scale is False, then this is
an M-estimator with a predetermined scale as used in the second
stage of an MM-estimator.
"""
converged = False
if start_scale is not None:
scale_old = start_scale
else:
scale_old = 1
# will be reset if start_shape is also None.
if start_mean is not None:
mean_old = start_mean
else:
mean_old = np.median(self.data, axis=0)
if start_shape is not None:
shape_old = start_shape
else:
shape_old = np.cov(self.data.T)
scale = np.linalg.det(shape_old) ** (1 / self.k_vars)
shape_old /= scale
if start_scale is not None:
scale_old = scale
if update_scale is False:
scale = start_scale
for i in range(maxiter):
shape, mean = self._fit_mean_shape(mean_old, shape_old, scale_old)
d = mahalanobis(self.data - mean, shape, sqrt=True)
if update_scale:
scale = self._fit_scale(d, start_scale=scale_old, maxiter=10)
if (np.allclose(scale, scale_old, rtol=1e-5) and
np.allclose(mean, mean_old, rtol=1e-5) and
np.allclose(shape, shape_old, rtol=1e-5)
): # noqa E124
converged = True
break
scale_old = scale
mean_old = mean
shape_old = shape
maha = mahalanobis(self.data - mean, shape / scale, sqrt=True)
res = Holder(
mean=mean,
shape=shape,
scale=scale,
cov=shape * scale**2,
converged=converged,
n_iter=i,
mahalanobis=maha,
)
return res | Estimate mean, shape and scale parameters with MM-estimator.
Parameters
----------
start_mean : None or float
Starting value for mean, center.
If None, then median is used.
start_shape : None or 2-dim ndarray
Starting value of shape matrix, i.e. scatter matrix normalized
to det(scatter) = 1.
If None, then scaled covariance matrix of data is used.
start_scale : None or float.
Starting value of scale.
maxiter : int
Maximum number of iterations.
update_scale : bool
If update_scale is False, then
Returns
-------
results instance with mean, shape, scale, cov and other attributes.
Notes
-----
If start_scale is provided and update_scale is False, then this is
an M-estimator with a predetermined scale as used in the second
stage of an MM-estimator. | fit | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _cstep(self, x, mean, cov, h, maxiter=2, tol=1e-8):
"""C-step for mcd iteration
x is data, perc is percentile h / nobs, don't need perc when we
use np.argpartition
requires starting mean and cov
"""
converged = False
for _ in range(maxiter):
d = mahalanobis(x - mean, cov)
idx_sel = np.argpartition(d, h)[:h]
x_sel = x[idx_sel]
mean = x_sel.mean(0)
cov_new = np.cov(x_sel.T, ddof=1)
if ((cov - cov_new)**2).mean() < tol:
cov = cov_new
converged = True
break
cov = cov_new
return mean, cov, converged | C-step for mcd iteration
x is data, perc is percentile h / nobs, don't need perc when we
use np.argpartition
requires starting mean and cov | _cstep | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _fit_one(self, x, idx, h, maxiter=2, mean=None, cov=None):
"""Compute mcd for one starting set of observations.
Parameters
----------
x : ndarray
Data.
idx : ndarray
Indices or mask of observation in starting set, used as ``x[idx]``
h : int
Number of observations in evaluation set for cov.
maxiter : int
Maximum number of c-steps.
Returns
-------
mean : ndarray
Estimated mean.
cov : ndarray
Estimated covariance.
det : float
Determinant of estimated covariance matrix.
Notes
-----
This does not do any preprocessing of the data and returns the
empirical mean and covariance of evaluation set of the data ``x``.
"""
if idx is not None:
x_sel = x[idx]
else:
x_sel = x
if mean is None:
mean = x_sel.mean(0)
if cov is None:
cov = np.cov(x_sel.T, ddof=1)
# updated with c-step
mean, cov, conv = self._cstep(x, mean, cov, h, maxiter=maxiter)
det = np.linalg.det(cov)
return mean, cov, det, conv | Compute mcd for one starting set of observations.
Parameters
----------
x : ndarray
Data.
idx : ndarray
Indices or mask of observation in starting set, used as ``x[idx]``
h : int
Number of observations in evaluation set for cov.
maxiter : int
Maximum number of c-steps.
Returns
-------
mean : ndarray
Estimated mean.
cov : ndarray
Estimated covariance.
det : float
Determinant of estimated covariance matrix.
Notes
-----
This does not do any preprocessing of the data and returns the
empirical mean and covariance of evaluation set of the data ``x``. | _fit_one | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def fit(self, h, *, h_start=None, mean_func=None, scale_func=None,
maxiter=100, options_start=None, reweight=True,
trim_frac=0.975, maxiter_step=100):
"""
Compute minimum covariance determinant estimate of mean and covariance.
x : array-like
Data with observation in rows and variables in columns.
h : int
Number of observations in evaluation set for minimimizing
determinant.
h_start : int
Number of observations used in starting mean and covariance.
mean_func, scale_func : callable or None.
Mean and scale function for initial standardization.
Current defaults, if they are None, are median and mad, but
default scale_func will likely change.
options_start : None or dict
Options for the starting estimators.
currently not used
TODO: which options? e.g. for OGK
reweight : bool
If reweight is true, then a reweighted estimator is returned. The
reweighting is based on a chisquare trimming of Mahalanobis
distances. The raw results are in the ``results_raw`` attribute.
trim_frac : float in (0, 1)
Trim fraction used if reweight is true. Used to compute quantile
of chisquare distribution with tail probability 1 - trim_frac.
maxiter_step : int
Number of iteration in the c-step.
In the current implementation a small maxiter in the c-step does
not find the optimal solution.
Returns
-------
Holder instance with results
"""
x = self.data
nobs, k_vars = x.shape
if h is None:
h = (nobs + k_vars + 1) // 2 # check with literature
if mean_func is None:
mean_func = lambda x: np.median(x, axis=0) # noqa
if scale_func is None:
scale_func = mad
if options_start is None:
options_start = {}
if h_start is None:
nobs, k_vars = x.shape
h_start = max(nobs // 2 + 1, k_vars + 1)
m = mean_func(x)
s = scale_func(x)
z = (x - m) / s
# get initial mean, cov of standardized data, we only need ranking
# of obs
starts = _get_detcov_startidx(z, h_start, options_start)
fac_trunc = coef_normalize_cov_truncated(h / nobs, k_vars)
res = {}
for ii, ini in enumerate(starts):
idx_sel, method = ini
mean, cov, det, _ = self._fit_one(x, idx_sel, h,
maxiter=maxiter_step)
res[ii] = Holder(
mean=mean,
cov=cov * fac_trunc,
det_subset=det,
method=method,
)
det_all = np.array([i.det_subset for i in res.values()])
idx_best = np.argmin(det_all)
best = res[idx_best]
# mean = best.mean
# cov = best.cov
# need to c-step to convergence for best,
# is with best 2 in original DetMCD
if maxiter_step < maxiter:
mean, cov, det, conv = self._fit_one(x, None, h, maxiter=maxiter,
mean=best.mean, cov=best.cov)
best = Holder(
mean=mean,
cov=cov * fac_trunc,
det_subset=det,
method=method,
converged=conv,
)
# include extra info in returned Holder instance
best.det_all = det_all
best.idx_best = idx_best
best.tmean = m
best.tscale = s
if reweight:
cov, mean = _reweight(x, best.mean, best.cov, trim_frac=trim_frac,
ddof=1)
fac_trunc = coef_normalize_cov_truncated(trim_frac, k_vars)
best_w = Holder(
mean=mean,
cov=cov * fac_trunc,
# det_subset=det,
method=method,
results_raw=best,
)
return best_w
else:
return best # is Holder instance already | Compute minimum covariance determinant estimate of mean and covariance.
x : array-like
Data with observation in rows and variables in columns.
h : int
Number of observations in evaluation set for minimimizing
determinant.
h_start : int
Number of observations used in starting mean and covariance.
mean_func, scale_func : callable or None.
Mean and scale function for initial standardization.
Current defaults, if they are None, are median and mad, but
default scale_func will likely change.
options_start : None or dict
Options for the starting estimators.
currently not used
TODO: which options? e.g. for OGK
reweight : bool
If reweight is true, then a reweighted estimator is returned. The
reweighting is based on a chisquare trimming of Mahalanobis
distances. The raw results are in the ``results_raw`` attribute.
trim_frac : float in (0, 1)
Trim fraction used if reweight is true. Used to compute quantile
of chisquare distribution with tail probability 1 - trim_frac.
maxiter_step : int
Number of iteration in the c-step.
In the current implementation a small maxiter in the c-step does
not find the optimal solution.
Returns
-------
Holder instance with results | fit | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _get_start_params(self, idx):
"""Starting parameters from a subsample given by index
Parameters
----------
idx : ndarray
Index used to select observations from the data. The index is used
for numpy arrays, so it can be either a boolean mask or integers.
Returns
-------
mean : ndarray
Mean of subsample
shape : ndarray
The shape matrix of the subsample which is the covariance
normalized so that determinant of shape is one.
scale : float
Scale of subsample, computed so that cov = shape * scale.
"""
x_sel = self.data[idx]
k = x_sel.shape[1]
mean = x_sel.mean(0)
cov = np.cov(x_sel.T)
scale2 = np.linalg.det(cov) ** (1 / k)
shape = cov / scale2
scale = np.sqrt(scale2)
return mean, shape, scale | Starting parameters from a subsample given by index
Parameters
----------
idx : ndarray
Index used to select observations from the data. The index is used
for numpy arrays, so it can be either a boolean mask or integers.
Returns
-------
mean : ndarray
Mean of subsample
shape : ndarray
The shape matrix of the subsample which is the covariance
normalized so that determinant of shape is one.
scale : float
Scale of subsample, computed so that cov = shape * scale. | _get_start_params | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def _fit_one(self, mean=None, shape=None, scale=None, maxiter=100):
"""Compute local M-estimator for one starting set of observations.
Parameters
----------
x : ndarray
Data.
idx : ndarray
Indices or mask of observation in starting set, used as ``x[idx]``
h : int
Number of observations in evaluation set for cov.
maxiter : int
Maximum number of c-steps.
Returns
-------
mean : ndarray
Estimated mean.
cov : ndarray
Estimated covariance.
det : float
Determinant of estimated covariance matrix.
Notes
-----
This uses CovM to solve for the local optimum for given starting
values.
"""
res = self.mod.fit(
start_mean=mean,
start_shape=shape,
start_scale=scale,
maxiter=maxiter,
update_scale=True
)
return res | Compute local M-estimator for one starting set of observations.
Parameters
----------
x : ndarray
Data.
idx : ndarray
Indices or mask of observation in starting set, used as ``x[idx]``
h : int
Number of observations in evaluation set for cov.
maxiter : int
Maximum number of c-steps.
Returns
-------
mean : ndarray
Estimated mean.
cov : ndarray
Estimated covariance.
det : float
Determinant of estimated covariance matrix.
Notes
-----
This uses CovM to solve for the local optimum for given starting
values. | _fit_one | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def fit(self, *, h_start=None, mean_func=None, scale_func=None,
maxiter=100, options_start=None, maxiter_step=5):
"""Compute S-estimator of mean and covariance.
Parameters
----------
h_start : int
Number of observations used in starting mean and covariance.
mean_func, scale_func : callable or None.
Mean and scale function for initial standardization.
Current defaults, if they are None, are median and mad, but
default scale_func will likely change.
options_start : None or dict
Options for the starting estimators.
TODO: which options? e.g. for OGK
Returns
-------
Holder instance with results
"""
x = self.data
nobs, k_vars = x.shape
if mean_func is None:
mean_func = lambda x: np.median(x, axis=0) # noqa
if scale_func is None:
scale_func = mad
if options_start is None:
options_start = {}
if h_start is None:
nobs, k_vars = x.shape
h_start = max(nobs // 2 + 1, k_vars + 1)
m = mean_func(x)
s = scale_func(x)
z = (x - m) / s
# get initial mean, cov of standardized data, we only need ranking
# of obs
starts = _get_detcov_startidx(z, h_start, options_start)
res = {}
for ii, ini in enumerate(starts):
idx_sel, method = ini
mean0, shape0, scale0 = self._get_start_params(idx_sel)
res_i = self._fit_one(
mean=mean0,
shape=shape0,
scale=scale0,
maxiter=maxiter_step,
)
res_i.method = method
res[ii] = res_i
scale_all = np.array([i.scale for i in res.values()])
idx_best = np.argmin(scale_all)
best = res[idx_best]
# mean = best.mean
# cov = best.cov
# need to c-step to convergence for best,
# is with best 2 in original DetMCD
if maxiter_step < maxiter:
best = self._fit_one(
mean=best.mean,
shape=best.shape,
scale=best.scale,
maxiter=maxiter,
)
# include extra info in returned Holder instance
best.scale_all = scale_all
best.idx_best = idx_best
best.tmean = m
best.tscale = s
return best # is Holder instance already | Compute S-estimator of mean and covariance.
Parameters
----------
h_start : int
Number of observations used in starting mean and covariance.
mean_func, scale_func : callable or None.
Mean and scale function for initial standardization.
Current defaults, if they are None, are median and mad, but
default scale_func will likely change.
options_start : None or dict
Options for the starting estimators.
TODO: which options? e.g. for OGK
Returns
-------
Holder instance with results | fit | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def fit(self, maxiter=100):
"""Estimate model parameters.
Parameters
----------
maxiter : int
Maximum number of iterations in the second stage M-estimation.
fit args : dict
currently missing
Returns
-------
Instance of a results or holder class.
Notes
-----
This uses CovDetS for the first stage estimation and CovM with fixed
scale in the second stage MM-estimation.
TODO: fit options are missing.
"""
# first stage estimate
mod_s = CovDetS(
self.data,
norm=None,
breakdown_point=self.breakdown_point
)
res_s = mod_s.fit()
res = self.mod.fit(
start_mean=res_s.mean,
start_shape=res_s.shape,
start_scale=res_s.scale,
maxiter=maxiter,
update_scale=False,
)
return res | Estimate model parameters.
Parameters
----------
maxiter : int
Maximum number of iterations in the second stage M-estimation.
fit args : dict
currently missing
Returns
-------
Instance of a results or holder class.
Notes
-----
This uses CovDetS for the first stage estimation and CovM with fixed
scale in the second stage MM-estimation.
TODO: fit options are missing. | fit | python | statsmodels/statsmodels | statsmodels/robust/covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/covariance.py | BSD-3-Clause |
def test_covdetmcd():
# results from rrcov
# > cdet = CovMcd(x = hbk, raw.only = TRUE, nsamp = "deterministic",
# use.correction=FALSE)
cov_dmcd_r = np.array("""
2.2059619213639 0.0223939863695 0.7898958050933 0.4060613360808
0.0223939863695 1.1384166802155 0.4315534571891 -0.2344041030201
0.7898958050933 0.4315534571891 1.8930117467493 -0.3292893001459
0.4060613360808 -0.2344041030201 -0.3292893001459 0.6179686100845
""".split(), float).reshape(4, 4)
mean_dmcd_r = np.array([1.7725, 2.2050, 1.5375, -0.0575])
mod = robcov.CovDetMCD(dta_hbk)
res = mod.fit(40, maxiter_step=100, reweight=False)
assert_allclose(res.mean, mean_dmcd_r, rtol=1e-5)
assert_allclose(res.cov, cov_dmcd_r, rtol=1e-5)
# with reweighting
# covMcd(x = hbk, nsamp = "deterministic", use.correction = FALSE)
# iBest: 5; C-step iterations: 7, 7, 7, 4, 6, 6
# Log(Det.): -2.42931967153
mean_dmcdw_r = np.array([1.5338983050847, 1.8322033898305, 1.6745762711864,
-0.0728813559322])
cov_dmcdw_r = np.array("""
1.5677744869295 0.09285770205078 0.252076010128 0.13873444408300
0.0928577020508 1.56769177397171 0.224929617385 -0.00516128856542
0.2520760101278 0.22492961738467 1.483829106079 -0.20275013775619
0.1387344440830 -0.00516128856542 -0.202750137756 0.43326701543885
""".split(), float).reshape(4, 4)
mod = robcov.CovDetMCD(dta_hbk)
res = mod.fit(40, maxiter_step=100) # default is reweight=True
assert_allclose(res.mean, mean_dmcdw_r, rtol=1e-5)
# R uses different trimming correction
# compare only shape (using trace for simplicity)
shape = res.cov / np.trace(res.cov)
shape_r = cov_dmcdw_r / np.trace(cov_dmcdw_r)
assert_allclose(shape, shape_r, rtol=1e-5) | .split(), float).reshape(4, 4)
mean_dmcd_r = np.array([1.7725, 2.2050, 1.5375, -0.0575])
mod = robcov.CovDetMCD(dta_hbk)
res = mod.fit(40, maxiter_step=100, reweight=False)
assert_allclose(res.mean, mean_dmcd_r, rtol=1e-5)
assert_allclose(res.cov, cov_dmcd_r, rtol=1e-5)
# with reweighting
# covMcd(x = hbk, nsamp = "deterministic", use.correction = FALSE)
# iBest: 5; C-step iterations: 7, 7, 7, 4, 6, 6
# Log(Det.): -2.42931967153
mean_dmcdw_r = np.array([1.5338983050847, 1.8322033898305, 1.6745762711864,
-0.0728813559322])
cov_dmcdw_r = np.array( | test_covdetmcd | python | statsmodels/statsmodels | statsmodels/robust/tests/test_covariance.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tests/test_covariance.py | BSD-3-Clause |
def mean_func(x):
"""mean function for example"""
return x + 0.25 * x**2 | mean function for example | mean_func | python | statsmodels/statsmodels | statsmodels/robust/tests/test_mquantiles.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tests/test_mquantiles.py | BSD-3-Clause |
def std_func(x):
"""standard deviation function for example"""
return 0.1 * np.exp(2.5 + 0.75 * np.abs(x)) | standard deviation function for example | std_func | python | statsmodels/statsmodels | statsmodels/robust/tests/test_mquantiles.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tests/test_mquantiles.py | BSD-3-Clause |
def _shift_intercept(arr):
"""
A convenience function to make the SAS covariance matrix
compatible with statsmodels.rlm covariance
"""
arr = np.asarray(arr)
side = int(np.sqrt(len(arr)))
return np.roll(np.roll(arr.reshape(side, side), -1, axis=1), -1, axis=0) | A convenience function to make the SAS covariance matrix
compatible with statsmodels.rlm covariance | _shift_intercept | python | statsmodels/statsmodels | statsmodels/robust/tests/results/results_rlm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/robust/tests/results/results_rlm.py | BSD-3-Clause |
def _get_exogs(self):
"""list of exogs, for internal use in post-estimation
"""
return (self.exog, self.exog_infl) | list of exogs, for internal use in post-estimation | _get_exogs | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def loglike(self, params):
"""
Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model.
"""
return np.sum(self.loglikeobs(params)) | Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model. | loglike | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = (np.log(w[zero_idx] +
(1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))
llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]
return llf | Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n` | loglikeobs | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def score_obs(self, params):
"""
Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
# Unused, commented out
# mu = self.model_main.predict(params_main)
# TODO: need to allow for complex to use CS numerical derivatives
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx,:] = (score_main[zero_idx].T *
(1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
dldp[nonzero_idx,:] = score_main[nonzero_idx]
if self.inflation == 'logit':
dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *
(1 - w[zero_idx]) *
(1 - np.exp(llf_main[zero_idx])) /
np.exp(llf[zero_idx])).T
dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *
w[nonzero_idx]).T
elif self.inflation == 'probit':
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp)) | Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params` | score_obs | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def hessian(self, params):
"""
Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
"""
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if hess_arr_main is None or hess_arr_infl is None:
return approx_hess(params, self.loglike)
dim = self.k_exog + self.k_inflate
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate,:] = hess_arr_infl
hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr | Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
----- | hessian | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def predict(self, params, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', y_values=None):
"""
Predict expected response or other statistic given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor with coefficient
equal to 1. If exposure is specified, then it will be logged by
the method. The user does not need to log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
which : str (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' : the conditional expectation of endog E(y | x). This
takes inflated zeros into account.
- 'linear' : the linear predictor of the mean function.
- 'var' : returns the estimated variance of endog implied by the
model.
- 'mean-main' : mean of the main count model
- 'prob-main' : probability of selecting the main model.
The probability of zero inflation is ``1 - prob-main``.
- 'mean-nonzero' : expected value conditional on having observation
larger than zero, E(y | X, y>0)
- 'prob-zero' : probability of observing a zero count. P(y=0 | x)
- 'prob' : probabilities of each count from 0 to max(endog), or
for y_values if those are provided. This is a multivariate
return (2-dim when predicting for several observations).
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"``
"""
no_exog = False
if exog is None:
no_exog = True
exog = self.exog
if exog_infl is None:
if no_exog:
exog_infl = self.exog_infl
else:
if self._no_exog_infl:
exog_infl = np.ones((len(exog), 1))
else:
exog_infl = np.asarray(exog_infl)
if exog_infl.ndim == 1 and self.k_inflate == 1:
exog_infl = exog_infl[:, None]
if exposure is None:
if no_exog:
exposure = getattr(self, 'exposure', 0)
else:
exposure = 0
else:
exposure = np.log(exposure)
if offset is None:
if no_exog:
offset = getattr(self, 'offset', 0)
else:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)
lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset
# Refactor: This is pretty hacky,
# there should be an appropriate predict method in model_main
# this is just prob(y=0 | model_main)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', False)
tmp_exposure = getattr(self.model_main, 'exposure', False)
self.model_main.exog = exog
self.model_main.endog = np.zeros(exog.shape[0])
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
# tmp_offset might be an array with elementwise equality testing
#if np.size(tmp_offset) == 1 and tmp_offset[0] == 'no':
if tmp_offset is False:
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
#if np.size(tmp_exposure) == 1 and tmp_exposure[0] == 'no':
if tmp_exposure is False:
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
# end hack
prob_zero = (1 - prob_main) + prob_main * np.exp(llf)
if which == 'mean':
return prob_main * np.exp(lin_pred)
elif which == 'mean-main':
return np.exp(lin_pred)
elif which == 'linear':
return lin_pred
elif which == 'mean-nonzero':
return prob_main * np.exp(lin_pred) / (1 - prob_zero)
elif which == 'prob-zero':
return prob_zero
elif which == 'prob-main':
return prob_main
elif which == 'var':
mu = np.exp(lin_pred)
return self._predict_var(params, mu, 1 - prob_main)
elif which == 'prob':
return self._predict_prob(params, exog, exog_infl, exposure,
offset, y_values=y_values)
else:
raise ValueError('which = %s is not available' % which) | Predict expected response or other statistic given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor with coefficient
equal to 1. If exposure is specified, then it will be logged by
the method. The user does not need to log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
which : str (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' : the conditional expectation of endog E(y | x). This
takes inflated zeros into account.
- 'linear' : the linear predictor of the mean function.
- 'var' : returns the estimated variance of endog implied by the
model.
- 'mean-main' : mean of the main count model
- 'prob-main' : probability of selecting the main model.
The probability of zero inflation is ``1 - prob-main``.
- 'mean-nonzero' : expected value conditional on having observation
larger than zero, E(y | X, y>0)
- 'prob-zero' : probability of observing a zero count. P(y=0 | x)
- 'prob' : probabilities of each count from 0 to max(endog), or
for y_values if those are provided. This is a multivariate
return (2-dim when predicting for several observations).
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"`` | predict | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""NotImplemented
"""
raise NotImplementedError | NotImplemented | _derivative_predict | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main)
score_infl = self.model_infl._deriv_mean_dparams(params_infl)
score_main = self.model_main._deriv_mean_dparams(params_main)
dmat_infl = - mu[:, None] * score_infl
dmat_main = (1 - w[:, None]) * score_main
dmat = np.column_stack((dmat_infl, dmat_main))
return dmat | Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector. | _deriv_mean_dparams | python | statsmodels/statsmodels | statsmodels/discrete/count_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/discrete/count_model.py | BSD-3-Clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.