File size: 13,118 Bytes
7885a28 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 |
from math import sqrt
import numpy as np
from scipy._lib._util import _validate_int
from scipy.optimize import brentq
from scipy.special import ndtri
from ._discrete_distns import binom
from ._common import ConfidenceInterval
class BinomTestResult:
"""
Result of `scipy.stats.binomtest`.
Attributes
----------
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
statistic: float
The estimate of the proportion of successes.
pvalue : float
The p-value of the hypothesis test.
"""
def __init__(self, k, n, alternative, statistic, pvalue):
self.k = k
self.n = n
self.alternative = alternative
self.statistic = statistic
self.pvalue = pvalue
# add alias for backward compatibility
self.proportion_estimate = statistic
def __repr__(self):
s = ("BinomTestResult("
f"k={self.k}, "
f"n={self.n}, "
f"alternative={self.alternative!r}, "
f"statistic={self.statistic}, "
f"pvalue={self.pvalue})")
return s
def proportion_ci(self, confidence_level=0.95, method='exact'):
"""
Compute the confidence interval for ``statistic``.
Parameters
----------
confidence_level : float, optional
Confidence level for the computed confidence interval
of the estimated proportion. Default is 0.95.
method : {'exact', 'wilson', 'wilsoncc'}, optional
Selects the method used to compute the confidence interval
for the estimate of the proportion:
'exact' :
Use the Clopper-Pearson exact method [1]_.
'wilson' :
Wilson's method, without continuity correction ([2]_, [3]_).
'wilsoncc' :
Wilson's method, with continuity correction ([2]_, [3]_).
Default is ``'exact'``.
Returns
-------
ci : ``ConfidenceInterval`` object
The object has attributes ``low`` and ``high`` that hold the
lower and upper bounds of the confidence interval.
References
----------
.. [1] C. J. Clopper and E. S. Pearson, The use of confidence or
fiducial limits illustrated in the case of the binomial,
Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934).
.. [2] E. B. Wilson, Probable inference, the law of succession, and
statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212
(1927).
.. [3] Robert G. Newcombe, Two-sided confidence intervals for the
single proportion: comparison of seven methods, Statistics
in Medicine, 17, pp 857-872 (1998).
Examples
--------
>>> from scipy.stats import binomtest
>>> result = binomtest(k=7, n=50, p=0.1)
>>> result.statistic
0.14
>>> result.proportion_ci()
ConfidenceInterval(low=0.05819170033997342, high=0.26739600249700846)
"""
if method not in ('exact', 'wilson', 'wilsoncc'):
raise ValueError(f"method ('{method}') must be one of 'exact', "
"'wilson' or 'wilsoncc'.")
if not (0 <= confidence_level <= 1):
raise ValueError(f'confidence_level ({confidence_level}) must be in '
'the interval [0, 1].')
if method == 'exact':
low, high = _binom_exact_conf_int(self.k, self.n,
confidence_level,
self.alternative)
else:
# method is 'wilson' or 'wilsoncc'
low, high = _binom_wilson_conf_int(self.k, self.n,
confidence_level,
self.alternative,
correction=method == 'wilsoncc')
return ConfidenceInterval(low=low, high=high)
def _findp(func):
try:
p = brentq(func, 0, 1)
except RuntimeError:
raise RuntimeError('numerical solver failed to converge when '
'computing the confidence limits') from None
except ValueError as exc:
raise ValueError('brentq raised a ValueError; report this to the '
'SciPy developers') from exc
return p
def _binom_exact_conf_int(k, n, confidence_level, alternative):
"""
Compute the estimate and confidence interval for the binomial test.
Returns proportion, prop_low, prop_high
"""
if alternative == 'two-sided':
alpha = (1 - confidence_level) / 2
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'less':
alpha = 1 - confidence_level
plow = 0.0
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'greater':
alpha = 1 - confidence_level
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
phigh = 1.0
return plow, phigh
def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction):
# This function assumes that the arguments have already been validated.
# In particular, `alternative` must be one of 'two-sided', 'less' or
# 'greater'.
p = k / n
if alternative == 'two-sided':
z = ndtri(0.5 + 0.5*confidence_level)
else:
z = ndtri(confidence_level)
# For reference, the formulas implemented here are from
# Newcombe (1998) (ref. [3] in the proportion_ci docstring).
denom = 2*(n + z**2)
center = (2*n*p + z**2)/denom
q = 1 - p
if correction:
if alternative == 'less' or k == 0:
lo = 0.0
else:
dlo = (1 + z*sqrt(z**2 - 2 - 1/n + 4*p*(n*q + 1))) / denom
lo = center - dlo
if alternative == 'greater' or k == n:
hi = 1.0
else:
dhi = (1 + z*sqrt(z**2 + 2 - 1/n + 4*p*(n*q - 1))) / denom
hi = center + dhi
else:
delta = z/denom * sqrt(4*n*p*q + z**2)
if alternative == 'less' or k == 0:
lo = 0.0
else:
lo = center - delta
if alternative == 'greater' or k == n:
hi = 1.0
else:
hi = center + delta
return lo, hi
def binomtest(k, n, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
The binomial test [1]_ is a test of the null hypothesis that the
probability of success in a Bernoulli experiment is `p`.
Details of the test can be found in many texts on statistics, such
as section 24.5 of [2]_.
Parameters
----------
k : int
The number of successes.
n : int
The number of trials.
p : float, optional
The hypothesized probability of success, i.e. the expected
proportion of successes. The value must be in the interval
``0 <= p <= 1``. The default value is ``p = 0.5``.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
result : `~scipy.stats._result_classes.BinomTestResult` instance
The return value is an object with the following attributes:
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
statistic : float
The estimate of the proportion of successes.
pvalue : float
The p-value of the hypothesis test.
The object has the following methods:
proportion_ci(confidence_level=0.95, method='exact') :
Compute the confidence interval for ``statistic``.
Notes
-----
.. versionadded:: 1.7.0
References
----------
.. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test
.. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition),
Prentice Hall, Upper Saddle River, New Jersey USA (2010)
Examples
--------
>>> from scipy.stats import binomtest
A car manufacturer claims that no more than 10% of their cars are unsafe.
15 cars are inspected for safety, 3 were found to be unsafe. Test the
manufacturer's claim:
>>> result = binomtest(3, n=15, p=0.1, alternative='greater')
>>> result.pvalue
0.18406106910639114
The null hypothesis cannot be rejected at the 5% level of significance
because the returned p-value is greater than the critical value of 5%.
The test statistic is equal to the estimated proportion, which is simply
``3/15``:
>>> result.statistic
0.2
We can use the `proportion_ci()` method of the result to compute the
confidence interval of the estimate:
>>> result.proportion_ci(confidence_level=0.95)
ConfidenceInterval(low=0.05684686759024681, high=1.0)
"""
k = _validate_int(k, 'k', minimum=0)
n = _validate_int(n, 'n', minimum=1)
if k > n:
raise ValueError(f'k ({k}) must not be greater than n ({n}).')
if not (0 <= p <= 1):
raise ValueError(f"p ({p}) must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError(f"alternative ('{alternative}') not recognized; \n"
"must be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = binom.cdf(k, n, p)
elif alternative == 'greater':
pval = binom.sf(k-1, n, p)
else:
# alternative is 'two-sided'
d = binom.pmf(k, n, p)
rerr = 1 + 1e-7
if k == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif k < p * n:
ix = _binary_search_for_binom_tst(lambda x1: -binom.pmf(x1, n, p),
-d*rerr, np.ceil(p * n), n)
# y is the number of terms between mode and n that are <= d*rerr.
# ix gave us the first term where a(ix) <= d*rerr < a(ix-1)
# if the first equality doesn't hold, y=n-ix. Otherwise, we
# need to include ix as well as the equality holds. Note that
# the equality will hold in very very rare situations due to rerr.
y = n - ix + int(d*rerr == binom.pmf(ix, n, p))
pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p)
else:
ix = _binary_search_for_binom_tst(lambda x1: binom.pmf(x1, n, p),
d*rerr, 0, np.floor(p * n))
# y is the number of terms between 0 and mode that are <= d*rerr.
# we need to add a 1 to account for the 0 index.
# For comparing this with old behavior, see
# tst_binary_srch_for_binom_tst method in test_morestats.
y = ix + 1
pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p)
pval = min(1.0, pval)
result = BinomTestResult(k=k, n=n, alternative=alternative,
statistic=k/n, pvalue=pval)
return result
def _binary_search_for_binom_tst(a, d, lo, hi):
"""
Conducts an implicit binary search on a function specified by `a`.
Meant to be used on the binomial PMF for the case of two-sided tests
to obtain the value on the other side of the mode where the tail
probability should be computed. The values on either side of
the mode are always in order, meaning binary search is applicable.
Parameters
----------
a : callable
The function over which to perform binary search. Its values
for inputs lo and hi should be in ascending order.
d : float
The value to search.
lo : int
The lower end of range to search.
hi : int
The higher end of the range to search.
Returns
-------
int
The index, i between lo and hi
such that a(i)<=d<a(i+1)
"""
while lo < hi:
mid = lo + (hi-lo)//2
midval = a(mid)
if midval < d:
lo = mid+1
elif midval > d:
hi = mid-1
else:
return mid
if a(lo) <= d:
return lo
else:
return lo-1
|