code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def r_nointer(self):
'''contrast/restriction matrix for no interaction
'''
nia = self.n_interaction
R_nointer = np.hstack((np.zeros((nia, self.nvars-nia)), np.eye(nia)))
#inter_direct = resols_full_dropf.tval[-nia:]
R_nointer_transf = self.transform.inv_dot_right(R_nointer)
self.R_nointer_transf = R_nointer_transf
return R_nointer_transf | contrast/restriction matrix for no interaction | r_nointer | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def ttest_interaction(self):
'''ttests for no-interaction terms are zero
'''
#use self.r_nointer instead
nia = self.n_interaction
R_nointer = np.hstack((np.zeros((nia, self.nvars-nia)), np.eye(nia)))
#inter_direct = resols_full_dropf.tval[-nia:]
R_nointer_transf = self.transform.inv_dot_right(R_nointer)
self.R_nointer_transf = R_nointer_transf
t_res = self.resols.t_test(R_nointer_transf)
return t_res | ttests for no-interaction terms are zero | ttest_interaction | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def ftest_interaction(self):
'''ttests for no-interaction terms are zero
'''
R_nointer_transf = self.r_nointer()
return self.resols.f_test(R_nointer_transf) | ttests for no-interaction terms are zero | ftest_interaction | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def scoreatpercentile(data, percentile):
"""Return the score at the given percentile of the data.
Example:
>>> data = randn(100)
>>> scoreatpercentile(data, 50)
will return the median of sample `data`.
"""
per = np.array(percentile)
cdf = empiricalcdf(data)
interpolator = interpolate.interp1d(np.sort(cdf), np.sort(data))
return interpolator(per/100.) | Return the score at the given percentile of the data.
Example:
>>> data = randn(100)
>>> scoreatpercentile(data, 50)
will return the median of sample `data`. | scoreatpercentile | python | statsmodels/statsmodels | statsmodels/sandbox/stats/stats_dhuard.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_dhuard.py | BSD-3-Clause |
def percentileofscore(data, score):
"""Return the percentile-position of score relative to data.
score: Array of scores at which the percentile is computed.
Return percentiles (0-100).
Example
r = randn(50)
x = linspace(-2,2,100)
percentileofscore(r,x)
Raise an error if the score is outside the range of data.
"""
cdf = empiricalcdf(data)
interpolator = interpolate.interp1d(np.sort(data), np.sort(cdf))
return interpolator(score)*100. | Return the percentile-position of score relative to data.
score: Array of scores at which the percentile is computed.
Return percentiles (0-100).
Example
r = randn(50)
x = linspace(-2,2,100)
percentileofscore(r,x)
Raise an error if the score is outside the range of data. | percentileofscore | python | statsmodels/statsmodels | statsmodels/sandbox/stats/stats_dhuard.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_dhuard.py | BSD-3-Clause |
def empiricalcdf(data, method='Hazen'):
"""Return the empirical cdf.
Methods available:
Hazen: (i-0.5)/N
Weibull: i/(N+1)
Chegodayev: (i-.3)/(N+.4)
Cunnane: (i-.4)/(N+.2)
Gringorten: (i-.44)/(N+.12)
California: (i-1)/N
Where i goes from 1 to N.
"""
i = np.argsort(np.argsort(data)) + 1.
N = len(data)
method = method.lower()
if method == 'hazen':
cdf = (i-0.5)/N
elif method == 'weibull':
cdf = i/(N+1.)
elif method == 'california':
cdf = (i-1.)/N
elif method == 'chegodayev':
cdf = (i-.3)/(N+.4)
elif method == 'cunnane':
cdf = (i-.4)/(N+.2)
elif method == 'gringorten':
cdf = (i-.44)/(N+.12)
else:
raise ValueError('Unknown method. Choose among Weibull, Hazen,'
'Chegodayev, Cunnane, Gringorten and California.')
return cdf | Return the empirical cdf.
Methods available:
Hazen: (i-0.5)/N
Weibull: i/(N+1)
Chegodayev: (i-.3)/(N+.4)
Cunnane: (i-.4)/(N+.2)
Gringorten: (i-.44)/(N+.12)
California: (i-1)/N
Where i goes from 1 to N. | empiricalcdf | python | statsmodels/statsmodels | statsmodels/sandbox/stats/stats_dhuard.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_dhuard.py | BSD-3-Clause |
def empiricalcdf(self, data=None, method='Hazen'):
"""Return the empirical cdf.
Methods available:
Hazen: (i-0.5)/N
Weibull: i/(N+1)
Chegodayev: (i-.3)/(N+.4)
Cunnane: (i-.4)/(N+.2)
Gringorten: (i-.44)/(N+.12)
California: (i-1)/N
Where i goes from 1 to N.
"""
if data is None:
data = self.data
i = self.ranking
else:
i = np.argsort(np.argsort(data)) + 1.
N = len(data)
method = method.lower()
if method == 'hazen':
cdf = (i-0.5)/N
elif method == 'weibull':
cdf = i/(N+1.)
elif method == 'california':
cdf = (i-1.)/N
elif method == 'chegodayev':
cdf = (i-.3)/(N+.4)
elif method == 'cunnane':
cdf = (i-.4)/(N+.2)
elif method == 'gringorten':
cdf = (i-.44)/(N+.12)
else:
raise ValueError('Unknown method. Choose among Weibull, Hazen,'
'Chegodayev, Cunnane, Gringorten and California.')
return cdf | Return the empirical cdf.
Methods available:
Hazen: (i-0.5)/N
Weibull: i/(N+1)
Chegodayev: (i-.3)/(N+.4)
Cunnane: (i-.4)/(N+.2)
Gringorten: (i-.44)/(N+.12)
California: (i-1)/N
Where i goes from 1 to N. | empiricalcdf | python | statsmodels/statsmodels | statsmodels/sandbox/stats/stats_dhuard.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_dhuard.py | BSD-3-Clause |
def cdf_emp(self, score):
'''
this is score in dh
'''
return self.cdfintp(score) | this is score in dh | cdf_emp | python | statsmodels/statsmodels | statsmodels/sandbox/stats/stats_dhuard.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_dhuard.py | BSD-3-Clause |
def ppf_emp(self, quantile):
'''
this is score in dh
'''
return self.ppfintp(quantile) | this is score in dh | ppf_emp | python | statsmodels/statsmodels | statsmodels/sandbox/stats/stats_dhuard.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_dhuard.py | BSD-3-Clause |
def optimize_binning(self, method='Freedman'):
"""Find the optimal number of bins and update the bin countaccordingly.
Available methods : Freedman
Scott
"""
nobs = len(self.data)
if method=='Freedman':
IQR = self.ppf_emp(0.75) - self.ppf_emp(0.25) # Interquantile range(75% -25%)
width = 2* IQR* nobs**(-1./3)
elif method=='Scott':
width = 3.49 * np.std(self.data) * nobs**(-1./3)
self.nbin = (np.ptp(self.binlimit)/width)
return self.nbin | Find the optimal number of bins and update the bin countaccordingly.
Available methods : Freedman
Scott | optimize_binning | python | statsmodels/statsmodels | statsmodels/sandbox/stats/stats_dhuard.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_dhuard.py | BSD-3-Clause |
def get_tukeyQcrit(k, df, alpha=0.05):
"""
return critical values for Tukey's HSD (Q)
Parameters
----------
k : int in {2, ..., 10}
number of tests
df : int
degrees of freedom of error term
alpha : {0.05, 0.01}
type 1 error, 1-confidence level
not enough error checking for limitations
"""
if alpha == 0.05:
intp = interpolate.interp1d(crows, cv005[:, k - 2])
elif alpha == 0.01:
intp = interpolate.interp1d(crows, cv001[:, k - 2])
else:
raise ValueError("only implemented for alpha equal to 0.01 and 0.05")
return intp(df) | return critical values for Tukey's HSD (Q)
Parameters
----------
k : int in {2, ..., 10}
number of tests
df : int
degrees of freedom of error term
alpha : {0.05, 0.01}
type 1 error, 1-confidence level
not enough error checking for limitations | get_tukeyQcrit | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def get_tukeyQcrit2(k, df, alpha=0.05):
"""
return critical values for Tukey's HSD (Q)
Parameters
----------
k : int in {2, ..., 10}
number of tests
df : int
degrees of freedom of error term
alpha : {0.05, 0.01}
type 1 error, 1-confidence level
not enough error checking for limitations
"""
return studentized_range.ppf(1 - alpha, k, df) | return critical values for Tukey's HSD (Q)
Parameters
----------
k : int in {2, ..., 10}
number of tests
df : int
degrees of freedom of error term
alpha : {0.05, 0.01}
type 1 error, 1-confidence level
not enough error checking for limitations | get_tukeyQcrit2 | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def get_tukey_pvalue(k, df, q):
"""
return adjusted p-values for Tukey's HSD
Parameters
----------
k : int in {2, ..., 10}
number of tests
df : int
degrees of freedom of error term
q : scalar, array_like; q >= 0
quantile value of Studentized Range
"""
return studentized_range.sf(q, k, df) | return adjusted p-values for Tukey's HSD
Parameters
----------
k : int in {2, ..., 10}
number of tests
df : int
degrees of freedom of error term
q : scalar, array_like; q >= 0
quantile value of Studentized Range | get_tukey_pvalue | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def Tukeythreegene2(genes): # Performing the Tukey HSD post-hoc test for three genes
"""gend is a list, ie [first, second, third]"""
# qwb = xlrd.open_workbook('F:/Lab/bioinformatics/qcrittable.xls')
# opening the workbook containing the q crit table
# qwb.sheet_names()
# qcrittable = qwb.sheet_by_name(u'Sheet1')
means = []
stds = []
for gene in genes:
means.append(np.mean(gene))
std.append(np.std(gene)) # noqa:F821 See GH#5756
# firstmean = np.mean(first) #means of the three arrays
# secondmean = np.mean(second)
# thirdmean = np.mean(third)
# firststd = np.std(first) #standard deviations of the three arrays
# secondstd = np.std(second)
# thirdstd = np.std(third)
stds2 = []
for std in stds:
stds2.append(math.pow(std, 2)) | gend is a list, ie [first, second, third] | Tukeythreegene2 | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def maxzero(x):
"""find all up zero crossings and return the index of the highest
Not used anymore
>>> np.random.seed(12345)
>>> x = np.random.randn(8)
>>> x
array([-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057,
1.39340583, 0.09290788, 0.28174615])
>>> maxzero(x)
(4, array([1, 4]))
no up-zero-crossing at end
>>> np.random.seed(0)
>>> x = np.random.randn(8)
>>> x
array([ 1.76405235, 0.40015721, 0.97873798, 2.2408932 , 1.86755799,
-0.97727788, 0.95008842, -0.15135721])
>>> maxzero(x)
(None, array([6]))
"""
x = np.asarray(x)
cond1 = x[:-1] < 0
cond2 = x[1:] > 0
# allzeros = np.nonzero(np.sign(x[:-1])*np.sign(x[1:]) <= 0)[0] + 1
allzeros = np.nonzero((cond1 & cond2) | (x[1:] == 0))[0] + 1
if x[-1] >= 0:
maxz = max(allzeros)
else:
maxz = None
return maxz, allzeros | find all up zero crossings and return the index of the highest
Not used anymore
>>> np.random.seed(12345)
>>> x = np.random.randn(8)
>>> x
array([-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057,
1.39340583, 0.09290788, 0.28174615])
>>> maxzero(x)
(4, array([1, 4]))
no up-zero-crossing at end
>>> np.random.seed(0)
>>> x = np.random.randn(8)
>>> x
array([ 1.76405235, 0.40015721, 0.97873798, 2.2408932 , 1.86755799,
-0.97727788, 0.95008842, -0.15135721])
>>> maxzero(x)
(None, array([6])) | maxzero | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def maxzerodown(x):
"""find all up zero crossings and return the index of the highest
Not used anymore
>>> np.random.seed(12345)
>>> x = np.random.randn(8)
>>> x
array([-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057,
1.39340583, 0.09290788, 0.28174615])
>>> maxzero(x)
(4, array([1, 4]))
no up-zero-crossing at end
>>> np.random.seed(0)
>>> x = np.random.randn(8)
>>> x
array([ 1.76405235, 0.40015721, 0.97873798, 2.2408932 , 1.86755799,
-0.97727788, 0.95008842, -0.15135721])
>>> maxzero(x)
(None, array([6]))
"""
x = np.asarray(x)
cond1 = x[:-1] > 0
cond2 = x[1:] < 0
# allzeros = np.nonzero(np.sign(x[:-1])*np.sign(x[1:]) <= 0)[0] + 1
allzeros = np.nonzero((cond1 & cond2) | (x[1:] == 0))[0] + 1
if x[-1] <= 0:
maxz = max(allzeros)
else:
maxz = None
return maxz, allzeros | find all up zero crossings and return the index of the highest
Not used anymore
>>> np.random.seed(12345)
>>> x = np.random.randn(8)
>>> x
array([-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057,
1.39340583, 0.09290788, 0.28174615])
>>> maxzero(x)
(4, array([1, 4]))
no up-zero-crossing at end
>>> np.random.seed(0)
>>> x = np.random.randn(8)
>>> x
array([ 1.76405235, 0.40015721, 0.97873798, 2.2408932 , 1.86755799,
-0.97727788, 0.95008842, -0.15135721])
>>> maxzero(x)
(None, array([6])) | maxzerodown | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def rejectionline(n, alpha=0.5):
"""reference line for rejection in multiple tests
Not used anymore
from: section 3.2, page 60
"""
t = np.arange(n) / float(n)
frej = t / (t * (1 - alpha) + alpha)
return frej | reference line for rejection in multiple tests
Not used anymore
from: section 3.2, page 60 | rejectionline | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def fdrcorrection_bak(pvals, alpha=0.05, method="indep"):
"""Reject False discovery rate correction for pvalues
Old version, to be deleted
missing: methods that estimate fraction of true hypotheses
"""
from statsmodels.stats.multitest import _ecdf as ecdf
pvals = np.asarray(pvals)
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
pecdf = ecdf(pvals_sorted)
if method in ["i", "indep", "p", "poscorr"]:
rline = pvals_sorted / alpha
elif method in ["n", "negcorr"]:
cm = np.sum(1.0 / np.arange(1, len(pvals)))
rline = pvals_sorted / alpha * cm
elif method in ["g", "onegcorr"]: # what's this ? german diss
rline = pvals_sorted / (pvals_sorted * (1 - alpha) + alpha)
elif method in ["oth", "o2negcorr"]: # other invalid, cut-paste
cm = np.sum(np.arange(len(pvals)))
rline = pvals_sorted / alpha / cm
else:
raise ValueError("method not available")
reject = pecdf >= rline
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
else:
rejectmax = 0
reject[:rejectmax] = True
return reject[pvals_sortind.argsort()] | Reject False discovery rate correction for pvalues
Old version, to be deleted
missing: methods that estimate fraction of true hypotheses | fdrcorrection_bak | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def mcfdr(nrepl=100, nobs=50, ntests=10, ntrue=6, mu=0.5, alpha=0.05, rho=0.0):
"""MonteCarlo to test fdrcorrection"""
from statsmodels.stats.multitest import fdrcorrection as fdrcorrection0
# Unused result, commented out
# ntests - ntrue
locs = np.array([0.0] * ntrue + [mu] * (ntests - ntrue))
results = []
for i in range(nrepl):
# rvs = locs + stats.norm.rvs(size=(nobs, ntests))
rvs = locs + randmvn(rho, size=(nobs, ntests))
tt, tpval = stats.ttest_1samp(rvs, 0)
res = fdrcorrection_bak(np.abs(tpval), alpha=alpha, method="i")
res0 = fdrcorrection0(np.abs(tpval), alpha=alpha)
# res and res0 give the same results
results.append(
[np.sum(res[:ntrue]), np.sum(res[ntrue:])]
+ [np.sum(res0[:ntrue]), np.sum(res0[ntrue:])]
+ res.tolist()
+ np.sort(tpval).tolist()
+ [np.sum(tpval[:ntrue] < alpha), np.sum(tpval[ntrue:] < alpha)]
+ [
np.sum(tpval[:ntrue] < alpha / ntests),
np.sum(tpval[ntrue:] < alpha / ntests),
]
)
return np.array(results) | MonteCarlo to test fdrcorrection | mcfdr | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def randmvn(rho, size=(1, 2), standardize=False):
"""create random draws from equi-correlated multivariate normal distribution
Parameters
----------
rho : float
correlation coefficient
size : tuple of int
size is interpreted (nobs, nvars) where each row
Returns
-------
rvs : ndarray
nobs by nvars where each row is a independent random draw of nvars-
dimensional correlated rvs
"""
nobs, nvars = size
if 0 < rho and rho < 1:
rvs = np.random.randn(nobs, nvars + 1)
rvs2 = rvs[:, :-1] * np.sqrt(1 - rho) + rvs[:, -1:] * np.sqrt(rho)
elif rho == 0:
rvs2 = np.random.randn(nobs, nvars)
elif rho < 0:
if rho < -1.0 / (nvars - 1):
raise ValueError("rho has to be larger than -1./(nvars-1)")
elif rho == -1.0 / (nvars - 1):
rho = -1.0 / (nvars - 1 + 1e-10) # barely positive definite
# use Cholesky
A = rho * np.ones((nvars, nvars)) + (1 - rho) * np.eye(nvars)
rvs2 = np.dot(np.random.randn(nobs, nvars), np.linalg.cholesky(A).T)
if standardize:
rvs2 = stats.zscore(rvs2)
return rvs2 | create random draws from equi-correlated multivariate normal distribution
Parameters
----------
rho : float
correlation coefficient
size : tuple of int
size is interpreted (nobs, nvars) where each row
Returns
-------
rvs : ndarray
nobs by nvars where each row is a independent random draw of nvars-
dimensional correlated rvs | randmvn | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def tiecorrect(xranks):
"""
should be equivalent of scipy.stats.tiecorrect
"""
# casting to int rounds down, but not relevant for this case
rankbincount = np.bincount(np.asarray(xranks, dtype=int))
nties = rankbincount[rankbincount > 1]
ntot = float(len(xranks))
tiecorrection = 1 - (nties**3 - nties).sum() / (ntot**3 - ntot)
return tiecorrection | should be equivalent of scipy.stats.tiecorrect | tiecorrect | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def __init__(self, x, useranks=False, uni=None, intlab=None):
"""descriptive statistics by groups
Parameters
----------
x : ndarray, 2d
first column data, second column group labels
useranks : bool
if true, then use ranks as data corresponding to the
scipy.stats.rankdata definition (start at 1, ties get mean)
uni, intlab : arrays (optional)
to avoid call to unique, these can be given as inputs
"""
self.x = np.asarray(x)
if intlab is None:
uni, intlab = np.unique(x[:, 1], return_inverse=True)
elif uni is None:
uni = np.unique(x[:, 1])
self.useranks = useranks
self.uni = uni
self.intlab = intlab
self.groupnobs = np.bincount(intlab)
# temporary until separated and made all lazy
self.runbasic(useranks=useranks) | descriptive statistics by groups
Parameters
----------
x : ndarray, 2d
first column data, second column group labels
useranks : bool
if true, then use ranks as data corresponding to the
scipy.stats.rankdata definition (start at 1, ties get mean)
uni, intlab : arrays (optional)
to avoid call to unique, these can be given as inputs | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def runbasic_old(self, useranks=False):
"""runbasic_old"""
# check: refactoring screwed up case useranks=True
# groupxsum = np.bincount(intlab, weights=X[:,0])
# groupxmean = groupxsum * 1.0 / groupnobs
x = self.x
if useranks:
self.xx = x[:, 1].argsort().argsort() + 1 # rankraw
else:
self.xx = x[:, 0]
self.groupsum = groupranksum = np.bincount(self.intlab, weights=self.xx)
# print('groupranksum', groupranksum, groupranksum.shape, self.groupnobs.shape
# start at 1 for stats.rankdata :
self.groupmean = grouprankmean = groupranksum * 1.0 / self.groupnobs # + 1
self.groupmeanfilter = grouprankmean[self.intlab] | runbasic_old | runbasic_old | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def runbasic(self, useranks=False):
"""runbasic"""
# check: refactoring screwed up case useranks=True
# groupxsum = np.bincount(intlab, weights=X[:,0])
# groupxmean = groupxsum * 1.0 / groupnobs
x = self.x
if useranks:
xuni, xintlab = np.unique(x[:, 0], return_inverse=True)
ranksraw = x[:, 0].argsort().argsort() + 1 # rankraw
self.xx = GroupsStats(
np.column_stack([ranksraw, xintlab]), useranks=False
).groupmeanfilter
else:
self.xx = x[:, 0]
self.groupsum = groupranksum = np.bincount(self.intlab, weights=self.xx)
# print('groupranksum', groupranksum, groupranksum.shape, self.groupnobs.shape
# start at 1 for stats.rankdata :
self.groupmean = grouprankmean = groupranksum * 1.0 / self.groupnobs # + 1
self.groupmeanfilter = grouprankmean[self.intlab] | runbasic | runbasic | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def groupdemean(self):
"""groupdemean"""
return self.xx - self.groupmeanfilter | groupdemean | groupdemean | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def groupsswithin(self):
"""groupsswithin"""
xtmp = self.groupdemean()
return np.bincount(self.intlab, weights=xtmp**2) | groupsswithin | groupsswithin | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def groupvarwithin(self):
"""groupvarwithin"""
return self.groupsswithin() / (self.groupnobs - 1) # .sum() | groupvarwithin | groupvarwithin | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def summary(self):
"""Summary table that can be printed"""
return self._results_table | Summary table that can be printed | summary | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def summary_frame(self):
"""Summary DataFrame
The group columns are labeled as "group_t" and "group_c" with mean
difference defined as treatment minus control.
This should be less confusing than numeric labels group1 and group2.
Returns
-------
pandas.DataFrame
Notes
-----
The number of columns will likely increase in a future version of
statsmodels. Do not use numeric indices for the DataFrame in order
to be robust to the addition of columns.
"""
frame = pd.DataFrame({
"group_t": self.group_t,
"group_c": self.group_c,
"meandiff": self.meandiffs,
"p-adj": self.pvalues,
"lower": self.confint[:, 0],
"upper": self.confint[:, 1],
"reject": self.reject,
})
return frame | Summary DataFrame
The group columns are labeled as "group_t" and "group_c" with mean
difference defined as treatment minus control.
This should be less confusing than numeric labels group1 and group2.
Returns
-------
pandas.DataFrame
Notes
-----
The number of columns will likely increase in a future version of
statsmodels. Do not use numeric indices for the DataFrame in order
to be robust to the addition of columns. | summary_frame | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def _simultaneous_ci(self):
"""Compute simultaneous confidence intervals for comparison of means."""
q_crit_hsd = self._get_q_crit(hsd=True)
self.halfwidths = simultaneous_ci(
q_crit_hsd,
self.variance,
self._multicomp.groupstats.groupnobs,
self._multicomp.pairindices,
) | Compute simultaneous confidence intervals for comparison of means. | _simultaneous_ci | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def plot_simultaneous(
self, comparison_name=None, ax=None, figsize=(10, 6), xlabel=None, ylabel=None
):
"""Plot a universal confidence interval of each group mean
Visualize significant differences in a plot with one confidence
interval per group instead of all pairwise confidence intervals.
Parameters
----------
comparison_name : str, optional
if provided, plot_intervals will color code all groups that are
significantly different from the comparison_name red, and will
color code insignificant groups gray. Otherwise, all intervals will
just be plotted in black.
ax : matplotlib axis, optional
An axis handle on which to attach the plot.
figsize : tuple, optional
tuple for the size of the figure generated
xlabel : str, optional
Name to be displayed on x axis
ylabel : str, optional
Name to be displayed on y axis
Returns
-------
Figure
handle to figure object containing interval plots
Notes
-----
Multiple comparison tests are nice, but lack a good way to be
visualized. If you have, say, 6 groups, showing a graph of the means
between each group will require 15 confidence intervals.
Instead, we can visualize inter-group differences with a single
interval for each group mean. Hochberg et al. [1] first proposed this
idea and used Tukey's Q critical value to compute the interval widths.
Unlike plotting the differences in the means and their respective
confidence intervals, any two pairs can be compared for significance
by looking for overlap.
The derivation in Hochberg and Tamhane is under the equal variance
assumption. We use the same computation in the case of unequal
variances, however, with replacement of the common pooled variance
by the unequal estimates of the whithin group variances.
This provides a plot that looks more informative and plausible in the
case where there are large differences in variances. In the equal
sample size and equal variance case, the confidence intervals computed
by the two methods, equal and unequal variance, are very close to
each other in larger samples.
References
----------
.. [*] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.
Examples
--------
>>> from statsmodels.examples.try_tukey_hsd import cylinders, cyl_labels
>>> from statsmodels.stats.multicomp import MultiComparison
>>> cardata = MultiComparison(cylinders, cyl_labels)
>>> results = cardata.tukeyhsd()
>>> results.plot_simultaneous()
<matplotlib.figure.Figure at 0x...>
This example shows an example plot comparing significant differences
in group means. Significant differences at the alpha=0.05 level can be
identified by intervals that do not overlap (i.e. USA vs Japan,
USA vs Germany).
>>> results.plot_simultaneous(comparison_name="USA")
<matplotlib.figure.Figure at 0x...>
Optionally provide one of the group names to color code the plot to
highlight group means different from comparison_name.
"""
fig, ax1 = utils.create_mpl_ax(ax)
if figsize is not None:
fig.set_size_inches(figsize)
if getattr(self, "halfwidths", None) is None:
self._simultaneous_ci()
means = self._multicomp.groupstats.groupmean
sigidx = []
nsigidx = []
minrange = [means[i] - self.halfwidths[i] for i in range(len(means))]
maxrange = [means[i] + self.halfwidths[i] for i in range(len(means))]
if comparison_name is None:
ax1.errorbar(
means,
lrange(len(means)),
xerr=self.halfwidths,
marker="o",
linestyle="None",
color="k",
ecolor="k",
)
else:
if comparison_name not in self.groupsunique:
raise ValueError("comparison_name not found in group names.")
midx = np.where(self.groupsunique == comparison_name)[0][0]
for i in range(len(means)):
if self.groupsunique[i] == comparison_name:
continue
if (
min(maxrange[i], maxrange[midx]) - max(minrange[i], minrange[midx])
< 0
):
sigidx.append(i)
else:
nsigidx.append(i)
# Plot the main comparison
ax1.errorbar(
means[midx],
midx,
xerr=self.halfwidths[midx],
marker="o",
linestyle="None",
color="b",
ecolor="b",
)
ax1.plot(
[minrange[midx]] * 2,
[-1, self._multicomp.ngroups],
linestyle="--",
color="0.7",
)
ax1.plot(
[maxrange[midx]] * 2,
[-1, self._multicomp.ngroups],
linestyle="--",
color="0.7",
)
# Plot those that are significantly different
if len(sigidx) > 0:
ax1.errorbar(
means[sigidx],
sigidx,
xerr=self.halfwidths[sigidx],
marker="o",
linestyle="None",
color="r",
ecolor="r",
)
# Plot those that are not significantly different
if len(nsigidx) > 0:
ax1.errorbar(
means[nsigidx],
nsigidx,
xerr=self.halfwidths[nsigidx],
marker="o",
linestyle="None",
color="0.5",
ecolor="0.5",
)
ax1.set_title("Multiple Comparisons Between All Pairs (Tukey)")
r = np.max(maxrange) - np.min(minrange)
ax1.set_ylim([-1, self._multicomp.ngroups])
ax1.set_xlim([np.min(minrange) - r / 10.0, np.max(maxrange) + r / 10.0])
ylbls = [""] + self.groupsunique.astype(str).tolist() + [""]
ax1.set_yticks(np.arange(-1, len(means) + 1))
ax1.set_yticklabels(ylbls)
ax1.set_xlabel(xlabel if xlabel is not None else "")
ax1.set_ylabel(ylabel if ylabel is not None else "")
return fig | Plot a universal confidence interval of each group mean
Visualize significant differences in a plot with one confidence
interval per group instead of all pairwise confidence intervals.
Parameters
----------
comparison_name : str, optional
if provided, plot_intervals will color code all groups that are
significantly different from the comparison_name red, and will
color code insignificant groups gray. Otherwise, all intervals will
just be plotted in black.
ax : matplotlib axis, optional
An axis handle on which to attach the plot.
figsize : tuple, optional
tuple for the size of the figure generated
xlabel : str, optional
Name to be displayed on x axis
ylabel : str, optional
Name to be displayed on y axis
Returns
-------
Figure
handle to figure object containing interval plots
Notes
-----
Multiple comparison tests are nice, but lack a good way to be
visualized. If you have, say, 6 groups, showing a graph of the means
between each group will require 15 confidence intervals.
Instead, we can visualize inter-group differences with a single
interval for each group mean. Hochberg et al. [1] first proposed this
idea and used Tukey's Q critical value to compute the interval widths.
Unlike plotting the differences in the means and their respective
confidence intervals, any two pairs can be compared for significance
by looking for overlap.
The derivation in Hochberg and Tamhane is under the equal variance
assumption. We use the same computation in the case of unequal
variances, however, with replacement of the common pooled variance
by the unequal estimates of the whithin group variances.
This provides a plot that looks more informative and plausible in the
case where there are large differences in variances. In the equal
sample size and equal variance case, the confidence intervals computed
by the two methods, equal and unequal variance, are very close to
each other in larger samples.
References
----------
.. [*] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.
Examples
--------
>>> from statsmodels.examples.try_tukey_hsd import cylinders, cyl_labels
>>> from statsmodels.stats.multicomp import MultiComparison
>>> cardata = MultiComparison(cylinders, cyl_labels)
>>> results = cardata.tukeyhsd()
>>> results.plot_simultaneous()
<matplotlib.figure.Figure at 0x...>
This example shows an example plot comparing significant differences
in group means. Significant differences at the alpha=0.05 level can be
identified by intervals that do not overlap (i.e. USA vs Japan,
USA vs Germany).
>>> results.plot_simultaneous(comparison_name="USA")
<matplotlib.figure.Figure at 0x...>
Optionally provide one of the group names to color code the plot to
highlight group means different from comparison_name. | plot_simultaneous | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def getranks(self):
"""convert data to rankdata and attach
This creates rankdata as it is used for non-parametric tests, where
in the case of ties the average rank is assigned.
"""
# bug: the next should use self.groupintlab instead of self.groups
# update: looks fixed
# self.ranks = GroupsStats(np.column_stack([self.data, self.groups]),
self.ranks = GroupsStats(
np.column_stack([self.data, self.groupintlab]), useranks=True
)
self.rankdata = self.ranks.groupmeanfilter | convert data to rankdata and attach
This creates rankdata as it is used for non-parametric tests, where
in the case of ties the average rank is assigned. | getranks | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def kruskal(self, pairs=None, multimethod="T"):
"""
pairwise comparison for kruskal-wallis test
This is just a reimplementation of scipy.stats.kruskal and does
not yet use a multiple comparison correction.
"""
self.getranks()
tot = self.nobs
meanranks = self.ranks.groupmean
groupnobs = self.ranks.groupnobs
# simultaneous/separate treatment of multiple tests
f = (tot * (tot + 1.0) / 12.0) / stats.tiecorrect(self.rankdata) # (xranks)
print("MultiComparison.kruskal")
for i, j in zip(*self.pairindices):
# pdiff = np.abs(mrs[i] - mrs[j])
pdiff = np.abs(meanranks[i] - meanranks[j])
se = np.sqrt(
f * np.sum(1.0 / groupnobs[[i, j]])
) # np.array([8,8]))) #Fixme groupnobs[[i,j]] ))
Q = pdiff / se
# TODO : print(statments, fix
print(i, j, pdiff, se, pdiff / se, pdiff / se > 2.6310)
print(stats.norm.sf(Q) * 2)
return stats.norm.sf(Q) * 2 | pairwise comparison for kruskal-wallis test
This is just a reimplementation of scipy.stats.kruskal and does
not yet use a multiple comparison correction. | kruskal | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def allpairtest(self, testfunc, alpha=0.05, method="bonf", pvalidx=1):
"""run a pairwise test on all pairs with multiple test correction
The statistical test given in testfunc is calculated for all pairs
and the p-values are adjusted by methods in multipletests. The p-value
correction is generic and based only on the p-values, and does not
take any special structure of the hypotheses into account.
Parameters
----------
testfunc : function
A test function for two (independent) samples. It is assumed that
the return value on position pvalidx is the p-value.
alpha : float
familywise error rate
method : str
This specifies the method for the p-value correction. Any method
of multipletests is possible.
pvalidx : int (default: 1)
position of the p-value in the return of testfunc
Returns
-------
sumtab : SimpleTable instance
summary table for printing
errors: TODO: check if this is still wrong, I think it's fixed.
results from multipletests are in different order
pval_corrected can be larger than 1 ???
"""
from statsmodels.stats.multitest import multipletests
res = []
for i, j in zip(*self.pairindices):
res.append(testfunc(self.datali[i], self.datali[j]))
res = np.array(res)
reject, pvals_corrected, alphacSidak, alphacBonf = multipletests(
res[:, pvalidx], alpha=alpha, method=method
)
# print(np.column_stack([res[:,0],res[:,1], reject, pvals_corrected])
i1, i2 = self.pairindices
if pvals_corrected is None:
resarr = np.array(
lzip(
self.groupsunique[i1],
self.groupsunique[i2],
np.round(res[:, 0], 4),
np.round(res[:, 1], 4),
reject,
),
dtype=[
("group1", object),
("group2", object),
("stat", float),
("pval", float),
("reject", np.bool_),
],
)
else:
resarr = np.array(
lzip(
self.groupsunique[i1],
self.groupsunique[i2],
np.round(res[:, 0], 4),
np.round(res[:, 1], 4),
np.round(pvals_corrected, 4),
reject,
),
dtype=[
("group1", object),
("group2", object),
("stat", float),
("pval", float),
("pval_corr", float),
("reject", np.bool_),
],
)
results_table = SimpleTable(resarr, headers=resarr.dtype.names)
results_table.title = "Test Multiple Comparison %s \n%s%4.2f method=%s" % (
testfunc.__name__,
"FWER=",
alpha,
method,
) + "\nalphacSidak=%4.2f, alphacBonf=%5.3f" % (alphacSidak, alphacBonf)
return (
results_table,
(res, reject, pvals_corrected, alphacSidak, alphacBonf),
resarr,
) | run a pairwise test on all pairs with multiple test correction
The statistical test given in testfunc is calculated for all pairs
and the p-values are adjusted by methods in multipletests. The p-value
correction is generic and based only on the p-values, and does not
take any special structure of the hypotheses into account.
Parameters
----------
testfunc : function
A test function for two (independent) samples. It is assumed that
the return value on position pvalidx is the p-value.
alpha : float
familywise error rate
method : str
This specifies the method for the p-value correction. Any method
of multipletests is possible.
pvalidx : int (default: 1)
position of the p-value in the return of testfunc
Returns
-------
sumtab : SimpleTable instance
summary table for printing
errors: TODO: check if this is still wrong, I think it's fixed.
results from multipletests are in different order
pval_corrected can be larger than 1 ??? | allpairtest | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def tukeyhsd(self, alpha=0.05, use_var='equal'):
"""
Tukey's range test to compare means of all pairs of groups
Parameters
----------
alpha : float, optional
Value of FWER at which to calculate HSD.
use_var : {"unequal", "equal"}
If ``use_var`` is "equal", then the Tukey-hsd pvalues are returned.
Tukey-hsd assumes that (within) variances are the same across groups.
If ``use_var`` is "unequal", then the Games-Howell pvalues are
returned. This uses Welch's t-test for unequal variances with
Satterthwait's corrected degrees of freedom for each pairwise
comparison.
Returns
-------
results : TukeyHSDResults instance
A results class containing relevant data and some post-hoc
calculations
Notes
-----
.. versionadded:: 0.15
` The `use_var` keyword and option for Games-Howell test.
"""
self.groupstats = GroupsStats(
np.column_stack([self.data, self.groupintlab]), useranks=False
)
gmeans = self.groupstats.groupmean
gnobs = self.groupstats.groupnobs
if use_var == 'unequal':
var_ = self.groupstats.groupvarwithin()
elif use_var == 'equal':
var_ = np.var(self.groupstats.groupdemean(), ddof=len(gmeans))
else:
raise ValueError('use_var should be "unequal" or "equal"')
# res contains: 0:(idx1, idx2), 1:reject, 2:meandiffs, 3: std_pairs,
# 4:confint, 5:q_crit, 6:df_total, 7:reject2, 8: pvals
res = tukeyhsd(gmeans, gnobs, var_, df=None, alpha=alpha, q_crit=None)
resarr = np.array(
lzip(
self.groupsunique[res[0][0]],
self.groupsunique[res[0][1]],
np.round(res[2], 4),
np.round(res[8], 4),
np.round(res[4][:, 0], 4),
np.round(res[4][:, 1], 4),
res[1],
),
dtype=[
("group1", object),
("group2", object),
("meandiff", float),
("p-adj", float),
("lower", float),
("upper", float),
("reject", np.bool_),
],
)
results_table = SimpleTable(resarr, headers=resarr.dtype.names)
results_table.title = (
"Multiple Comparison of Means - Tukey HSD, " + "FWER=%4.2f" % alpha
)
return TukeyHSDResults(
self, # mc_object, attached as _multicomp
results_table,
res[5], # q_crit, positional
reject=res[1],
meandiffs=res[2],
std_pairs=res[3],
confint=res[4],
df_total=res[6],
reject2=res[7],
variance=var_,
pvalues=res[8],
alpha=alpha,
group_t=self.groupsunique[res[0][1]],
group_c=self.groupsunique[res[0][0]],
) | Tukey's range test to compare means of all pairs of groups
Parameters
----------
alpha : float, optional
Value of FWER at which to calculate HSD.
use_var : {"unequal", "equal"}
If ``use_var`` is "equal", then the Tukey-hsd pvalues are returned.
Tukey-hsd assumes that (within) variances are the same across groups.
If ``use_var`` is "unequal", then the Games-Howell pvalues are
returned. This uses Welch's t-test for unequal variances with
Satterthwait's corrected degrees of freedom for each pairwise
comparison.
Returns
-------
results : TukeyHSDResults instance
A results class containing relevant data and some post-hoc
calculations
Notes
-----
.. versionadded:: 0.15
` The `use_var` keyword and option for Games-Howell test. | tukeyhsd | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def rankdata(x):
"""rankdata, equivalent to scipy.stats.rankdata
just a different implementation, I have not yet compared speed
"""
uni, intlab = np.unique(x[:, 0], return_inverse=True)
groupnobs = np.bincount(intlab)
# Unused result, commented out
# groupxsum = np.bincount(intlab, weights=X[:, 0])
# groupxsum * 1.0 / groupnobs
rankraw = x[:, 0].argsort().argsort()
groupranksum = np.bincount(intlab, weights=rankraw)
# start at 1 for stats.rankdata :
grouprankmean = groupranksum * 1.0 / groupnobs + 1
return grouprankmean[intlab] | rankdata, equivalent to scipy.stats.rankdata
just a different implementation, I have not yet compared speed | rankdata | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def compare_ordered(vals, alpha):
"""simple ordered sequential comparison of means
vals : array_like
means or rankmeans for independent groups
incomplete, no return, not used yet
"""
vals = np.asarray(vals)
sortind = np.argsort(vals)
sortind.argsort()
ntests = len(vals)
# alphacSidak = 1 - np.power((1. - alphaf), 1./ntests)
# alphacBonf = alphaf / float(ntests)
v1, v2 = np.triu_indices(ntests, 1)
# v1,v2 have wrong sequence
for i in range(4):
for j in range(4, i, -1):
print(i, j) | simple ordered sequential comparison of means
vals : array_like
means or rankmeans for independent groups
incomplete, no return, not used yet | compare_ordered | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def varcorrection_unbalanced(nobs_all, srange=False):
"""correction factor for variance with unequal sample sizes
this is just a harmonic mean
Parameters
----------
nobs_all : array_like
The number of observations for each sample
srange : bool
if true, then the correction is divided by the number of samples
for the variance of the studentized range statistic
Returns
-------
correction : float
Correction factor for variance.
Notes
-----
variance correction factor is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplied by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
"""
nobs_all = np.asarray(nobs_all)
if not srange:
return (1.0 / nobs_all).sum()
else:
return (1.0 / nobs_all).sum() / len(nobs_all) | correction factor for variance with unequal sample sizes
this is just a harmonic mean
Parameters
----------
nobs_all : array_like
The number of observations for each sample
srange : bool
if true, then the correction is divided by the number of samples
for the variance of the studentized range statistic
Returns
-------
correction : float
Correction factor for variance.
Notes
-----
variance correction factor is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplied by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken. | varcorrection_unbalanced | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def varcorrection_pairs_unbalanced(nobs_all, srange=False):
"""correction factor for variance with unequal sample sizes for all pairs
this is just a harmonic mean
Parameters
----------
nobs_all : array_like
The number of observations for each sample
srange : bool
if true, then the correction is divided by 2 for the variance of
the studentized range statistic
Returns
-------
correction : ndarray
Correction factor for variance.
Notes
-----
variance correction factor is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
For the studentized range statistic, the resulting factor has to be
divided by 2.
"""
# TODO: test and replace with broadcasting
n1, n2 = np.meshgrid(nobs_all, nobs_all)
if not srange:
return 1.0 / n1 + 1.0 / n2
else:
return (1.0 / n1 + 1.0 / n2) / 2.0 | correction factor for variance with unequal sample sizes for all pairs
this is just a harmonic mean
Parameters
----------
nobs_all : array_like
The number of observations for each sample
srange : bool
if true, then the correction is divided by 2 for the variance of
the studentized range statistic
Returns
-------
correction : ndarray
Correction factor for variance.
Notes
-----
variance correction factor is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
For the studentized range statistic, the resulting factor has to be
divided by 2. | varcorrection_pairs_unbalanced | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def varcorrection_unequal(var_all, nobs_all, df_all):
"""return joint variance from samples with unequal variances and unequal
sample sizes
something is wrong
Parameters
----------
var_all : array_like
The variance for each sample
nobs_all : array_like
The number of observations for each sample
df_all : array_like
degrees of freedom for each sample
Returns
-------
varjoint : float
joint variance.
dfjoint : float
joint Satterthwait's degrees of freedom
Notes
-----
(copy, paste not correct)
variance is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1/n.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
This is for variance of mean difference not of studentized range.
"""
var_all = np.asarray(var_all)
var_over_n = var_all * 1.0 / nobs_all # avoid integer division
varjoint = var_over_n.sum()
dfjoint = varjoint**2 / (var_over_n**2 * df_all).sum()
return varjoint, dfjoint | return joint variance from samples with unequal variances and unequal
sample sizes
something is wrong
Parameters
----------
var_all : array_like
The variance for each sample
nobs_all : array_like
The number of observations for each sample
df_all : array_like
degrees of freedom for each sample
Returns
-------
varjoint : float
joint variance.
dfjoint : float
joint Satterthwait's degrees of freedom
Notes
-----
(copy, paste not correct)
variance is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1/n.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
This is for variance of mean difference not of studentized range. | varcorrection_unequal | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def varcorrection_pairs_unequal(var_all, nobs_all, df_all):
"""return joint variance from samples with unequal variances and unequal
sample sizes for all pairs
something is wrong
Parameters
----------
var_all : array_like
The variance for each sample
nobs_all : array_like
The number of observations for each sample
df_all : array_like
degrees of freedom for each sample
Returns
-------
varjoint : ndarray
joint variance.
dfjoint : ndarray
joint Satterthwait's degrees of freedom
Notes
-----
(copy, paste not correct)
variance is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
"""
# TODO: test and replace with broadcasting
v1, v2 = np.meshgrid(var_all, var_all)
n1, n2 = np.meshgrid(nobs_all, nobs_all)
df1, df2 = np.meshgrid(df_all, df_all)
varjoint = v1 / n1 + v2 / n2
dfjoint = varjoint**2 / ((v1 / n1) ** 2 / df1 + (v2 / n2) ** 2 / df2)
return varjoint, dfjoint | return joint variance from samples with unequal variances and unequal
sample sizes for all pairs
something is wrong
Parameters
----------
var_all : array_like
The variance for each sample
nobs_all : array_like
The number of observations for each sample
df_all : array_like
degrees of freedom for each sample
Returns
-------
varjoint : ndarray
joint variance.
dfjoint : ndarray
joint Satterthwait's degrees of freedom
Notes
-----
(copy, paste not correct)
variance is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken. | varcorrection_pairs_unequal | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def tukeyhsd(mean_all, nobs_all, var_all, df=None, alpha=0.05, q_crit=None):
"""simultaneous Tukey HSD
check: instead of sorting, I use absolute value of pairwise differences
in means. That's irrelevant for the test, but maybe reporting actual
differences would be better.
CHANGED: meandiffs are with sign, studentized range uses abs
q_crit added for testing
TODO: error in variance calculation when nobs_all is scalar, missing 1/n
"""
mean_all = np.asarray(mean_all)
# check if or when other ones need to be arrays
n_means = len(mean_all)
if df is None:
df = nobs_all - 1
if np.size(df) == 1: # assumes balanced samples with df = n - 1, n_i = n
df_total = n_means * df
df = np.ones(n_means) * df
else:
df_total = np.sum(df)
df_pairs_ = None
if (np.size(nobs_all) == 1) and (np.size(var_all) == 1):
# balanced sample sizes and homogenous variance
var_pairs = 1.0 * var_all / nobs_all * np.ones((n_means, n_means))
elif np.size(var_all) == 1:
# unequal sample sizes and homogenous variance
var_pairs = var_all * varcorrection_pairs_unbalanced(nobs_all, srange=True)
elif np.size(var_all) > 1:
var_pairs, df_pairs_ = varcorrection_pairs_unequal(var_all, nobs_all, df)
var_pairs /= 2.
# check division by two for studentized range
else:
raise ValueError("not supposed to be here")
# meandiffs_ = mean_all[:,None] - mean_all
meandiffs_ = mean_all - mean_all[:, None] # reverse sign, check with R example
std_pairs_ = np.sqrt(var_pairs)
# select all pairs from upper triangle of matrix
idx1, idx2 = np.triu_indices(n_means, 1)
meandiffs = meandiffs_[idx1, idx2]
std_pairs = std_pairs_[idx1, idx2]
if df_pairs_ is not None:
df_total = df_pairs_[idx1, idx2]
st_range = np.abs(meandiffs) / std_pairs # studentized range statistic
# df_total_ = np.maximum(df_total, 5) # TODO: smallest df in table
if q_crit is None:
q_crit = get_tukeyQcrit2(n_means, df_total, alpha=alpha)
pvalues = get_tukey_pvalue(n_means, df_total, st_range)
# we need pvalues to be atleast_1d for iteration. see #6132
pvalues = np.atleast_1d(pvalues)
reject = st_range > q_crit
crit_int = std_pairs * q_crit
reject2 = np.abs(meandiffs) > crit_int
confint = np.column_stack((meandiffs - crit_int, meandiffs + crit_int))
return (
(idx1, idx2),
reject,
meandiffs,
std_pairs,
confint,
q_crit,
df_total,
reject2,
pvalues,
) | simultaneous Tukey HSD
check: instead of sorting, I use absolute value of pairwise differences
in means. That's irrelevant for the test, but maybe reporting actual
differences would be better.
CHANGED: meandiffs are with sign, studentized range uses abs
q_crit added for testing
TODO: error in variance calculation when nobs_all is scalar, missing 1/n | tukeyhsd | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def simultaneous_ci(q_crit, var, groupnobs, pairindices=None):
"""Compute simultaneous confidence intervals for comparison of means.
q_crit value is generated from tukey hsd test. Variance is considered
across all groups. Returned halfwidths can be thought of as uncertainty
intervals around each group mean. They allow for simultaneous
comparison of pairwise significance among any pairs (by checking for
overlap)
Parameters
----------
q_crit : float
The Q critical value studentized range statistic from Tukey's HSD
var : float
The group variance
groupnobs : array_like object
Number of observations contained in each group.
pairindices : tuple of lists, optional
Indices corresponding to the upper triangle of matrix. Computed
here if not supplied
Returns
-------
halfwidths : ndarray
Half the width of each confidence interval for each group given in
groupnobs
See Also
--------
MultiComparison : statistics class providing significance tests
tukeyhsd : among other things, computes q_crit value
References
----------
.. [*] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.)
"""
# Set initial variables
ng = len(groupnobs)
if pairindices is None:
pairindices = np.triu_indices(ng, 1)
# Compute dij for all pairwise comparisons ala hochberg p. 95
gvar = var / groupnobs
d12 = np.sqrt(gvar[pairindices[0]] + gvar[pairindices[1]])
# Create the full d matrix given all known dij vals
d = np.zeros((ng, ng))
d[pairindices] = d12
d = d + d.conj().T
# Compute the two global sums from hochberg eq 3.32
sum1 = np.sum(d12)
sum2 = np.sum(d, axis=0)
if ng > 2:
w = ((ng - 1.0) * sum2 - sum1) / ((ng - 1.0) * (ng - 2.0))
else:
w = sum1 * np.ones((2, 1)) / 2.0
return (q_crit / np.sqrt(2)) * w | Compute simultaneous confidence intervals for comparison of means.
q_crit value is generated from tukey hsd test. Variance is considered
across all groups. Returned halfwidths can be thought of as uncertainty
intervals around each group mean. They allow for simultaneous
comparison of pairwise significance among any pairs (by checking for
overlap)
Parameters
----------
q_crit : float
The Q critical value studentized range statistic from Tukey's HSD
var : float
The group variance
groupnobs : array_like object
Number of observations contained in each group.
pairindices : tuple of lists, optional
Indices corresponding to the upper triangle of matrix. Computed
here if not supplied
Returns
-------
halfwidths : ndarray
Half the width of each confidence interval for each group given in
groupnobs
See Also
--------
MultiComparison : statistics class providing significance tests
tukeyhsd : among other things, computes q_crit value
References
----------
.. [*] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.) | simultaneous_ci | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def distance_st_range(mean_all, nobs_all, var_all, df=None, triu=False):
"""pairwise distance matrix, outsourced from tukeyhsd
CHANGED: meandiffs are with sign, studentized range uses abs
q_crit added for testing
TODO: error in variance calculation when nobs_all is scalar, missing 1/n
"""
mean_all = np.asarray(mean_all)
# check if or when other ones need to be arrays
n_means = len(mean_all)
if df is None:
df = nobs_all - 1
if (np.size(nobs_all) == 1) and (np.size(var_all) == 1):
# balanced sample sizes and homogenous variance
var_pairs = 1.0 * var_all / nobs_all * np.ones((n_means, n_means))
elif np.size(var_all) == 1:
# unequal sample sizes and homogenous variance
var_pairs = var_all * varcorrection_pairs_unbalanced(nobs_all, srange=True)
elif np.size(var_all) > 1:
var_pairs, df_sum = varcorrection_pairs_unequal(var_all, nobs_all, df)
var_pairs /= 2.
# check division by two for studentized range
else:
raise ValueError("not supposed to be here")
# meandiffs_ = mean_all[:,None] - mean_all
meandiffs = mean_all - mean_all[:, None] # reverse sign, check with R example
std_pairs = np.sqrt(var_pairs)
idx1, idx2 = np.triu_indices(n_means, 1)
if triu:
# select all pairs from upper triangle of matrix
meandiffs = meandiffs_[idx1, idx2] # noqa: F821 See GH#5756
std_pairs = std_pairs_[idx1, idx2] # noqa: F821 See GH#5756
st_range = np.abs(meandiffs) / std_pairs # studentized range statistic
return st_range, meandiffs, std_pairs, (idx1, idx2) # return square arrays | pairwise distance matrix, outsourced from tukeyhsd
CHANGED: meandiffs are with sign, studentized range uses abs
q_crit added for testing
TODO: error in variance calculation when nobs_all is scalar, missing 1/n | distance_st_range | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def contrast_allpairs(nm):
"""contrast or restriction matrix for all pairs of nm variables
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm*(nm-1)/2, nm)
contrast matrix for all pairwise comparisons
"""
contr = []
for i in range(nm):
for j in range(i + 1, nm):
contr_row = np.zeros(nm)
contr_row[i] = 1
contr_row[j] = -1
contr.append(contr_row)
return np.array(contr) | contrast or restriction matrix for all pairs of nm variables
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm*(nm-1)/2, nm)
contrast matrix for all pairwise comparisons | contrast_allpairs | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def contrast_all_one(nm):
"""contrast or restriction matrix for all against first comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against first comparisons
"""
contr = np.column_stack((np.ones(nm - 1), -np.eye(nm - 1)))
return contr | contrast or restriction matrix for all against first comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against first comparisons | contrast_all_one | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def contrast_diff_mean(nm):
"""contrast or restriction matrix for all against mean comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against mean comparisons
"""
return np.eye(nm) - np.ones((nm, nm)) / nm | contrast or restriction matrix for all against mean comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against mean comparisons | contrast_diff_mean | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def tukey_pvalues(std_range, nm, df):
"""compute tukey p-values by numerical integration of multivariate-t distribution
"""
# corrected but very slow with warnings about integration
# need to increase maxiter or similar
# nm = len(std_range)
contr = contrast_allpairs(nm)
corr = np.dot(contr, contr.T) / 2.0
tstat = std_range / np.sqrt(2) * np.ones(corr.shape[0]) # need len of all pairs
return multicontrast_pvalues(tstat, corr, df=df) | compute tukey p-values by numerical integration of multivariate-t distribution | tukey_pvalues | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def multicontrast_pvalues(tstat, tcorr, df=None, dist="t", alternative="two-sided"):
"""pvalues for simultaneous tests
currently only for t distribution, normal distribution not added yet
alternative is ignored
"""
from statsmodels.sandbox.distributions.multivariate import mvstdtprob
if (df is None) and (dist == "t"):
raise ValueError("df has to be specified for the t-distribution")
tstat = np.asarray(tstat)
ntests = len(tstat)
cc = np.abs(tstat)
pval_global = 1 - mvstdtprob(-cc, cc, tcorr, df)
pvals = []
for ti in cc:
ti * np.ones(ntests)
pvals.append(1 - mvstdtprob(-cc, cc, tcorr, df))
return pval_global, np.asarray(pvals) | pvalues for simultaneous tests
currently only for t distribution, normal distribution not added yet
alternative is ignored | multicontrast_pvalues | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def get_crit(self, alpha):
"""
get_tukeyQcrit
currently tukey Q, add others
"""
q_crit = get_tukeyQcrit(self.n_vals, self.df, alpha=alpha)
return q_crit * np.ones(self.n_vals) | get_tukeyQcrit
currently tukey Q, add others | get_crit | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def get_distance_matrix(self):
"""studentized range statistic"""
# make into property, decorate
dres = distance_st_range(self.vals, self.nobs_all, self.var_all, df=self.df)
self.distance_matrix = dres[0] | studentized range statistic | get_distance_matrix | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def iter_subsets(self, indices):
"""Iterate substeps"""
for ii in range(len(indices)):
idxsub = copy.copy(indices)
idxsub.pop(ii)
yield idxsub | Iterate substeps | iter_subsets | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def check_set(self, indices):
"""check whether pairwise distances of indices satisfy condition"""
indtup = tuple(indices)
if indtup in self.cache_result:
return self.cache_result[indtup]
else:
set_distance_matrix = self.distance_matrix[
np.asarray(indices)[:, None], indices
]
n_elements = len(indices)
if np.any(set_distance_matrix > self.crit[n_elements - 1]):
res = True
else:
res = False
self.cache_result[indtup] = res
return res | check whether pairwise distances of indices satisfy condition | check_set | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def stepdown(self, indices):
"""stepdown"""
print(indices)
if self.check_set(indices): # larger than critical distance
if len(indices) > 2: # step down into subsets if more than 2 elements
for subs in self.iter_subsets(indices):
self.stepdown(subs)
else:
self.rejected.append(tuple(indices))
else:
self.accepted.append(tuple(indices))
return indices | stepdown | stepdown | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def run(self, alpha):
"""main function to run the test,
could be done in __call__ instead
this could have all the initialization code
"""
self.cache_result = {}
self.crit = self.get_crit(alpha) # decide where to set alpha, moved to run
self.accepted = [] # store accepted sets, not unique
self.rejected = []
self.get_distance_matrix()
self.stepdown(lrange(self.n_vals))
return list(set(self.accepted)), list(set(sd.rejected)) | main function to run the test,
could be done in __call__ instead
this could have all the initialization code | run | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def subsets(vals, indices_):
"""recursive function for constructing homogeneous subset
registers rejected and subsetli in outer scope
"""
i, j = (indices_[0], indices_[-1])
if vals[-1] - vals[0] > dcrit[i, j]:
rejected.append((indices_[0], indices_[-1]))
return [
subsets(vals[:-1], indices_[:-1]),
subsets(vals[1:], indices_[1:]),
(indices_[0], indices_[-1]),
]
else:
subsetsli.append(tuple(indices_))
return indices_ | recursive function for constructing homogeneous subset
registers rejected and subsetli in outer scope | homogeneous_subsets.subsets | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def homogeneous_subsets(vals, dcrit):
"""recursively check all pairs of vals for minimum distance
step down method as in Newman-Keuls and Ryan procedures. This is not a
closed procedure since not all partitions are checked.
Parameters
----------
vals : array_like
values that are pairwise compared
dcrit : array_like or float
critical distance for rejecting, either float, or 2-dimensional array
with distances on the upper triangle.
Returns
-------
rejs : list of pairs
list of pair-indices with (strictly) larger than critical difference
nrejs : list of pairs
list of pair-indices with smaller than critical difference
lli : list of tuples
list of subsets with smaller than critical difference
res : tree
result of all comparisons (for checking)
this follows description in SPSS notes on Post-Hoc Tests
Because of the recursive structure, some comparisons are made several
times, but only unique pairs or sets are returned.
Examples
--------
>>> m = [0, 2, 2.5, 3, 6, 8, 9, 9.5,10 ]
>>> rej, nrej, ssli, res = homogeneous_subsets(m, 2)
>>> set_partition(ssli)
([(5, 6, 7, 8), (1, 2, 3), (4,)], [0])
>>> [np.array(m)[list(pp)] for pp in set_partition(ssli)[0]]
[array([ 8. , 9. , 9.5, 10. ]), array([ 2. , 2.5, 3. ]), array([ 6.])]
"""
nvals = len(vals)
indices_ = lrange(nvals)
rejected = []
subsetsli = []
if np.size(dcrit) == 1:
dcrit = dcrit * np.ones((nvals, nvals)) # example numbers for experimenting
def subsets(vals, indices_):
"""recursive function for constructing homogeneous subset
registers rejected and subsetli in outer scope
"""
i, j = (indices_[0], indices_[-1])
if vals[-1] - vals[0] > dcrit[i, j]:
rejected.append((indices_[0], indices_[-1]))
return [
subsets(vals[:-1], indices_[:-1]),
subsets(vals[1:], indices_[1:]),
(indices_[0], indices_[-1]),
]
else:
subsetsli.append(tuple(indices_))
return indices_
res = subsets(vals, indices_)
all_pairs = [(i, j) for i in range(nvals) for j in range(nvals - 1, i, -1)]
rejs = set(rejected)
not_rejected = list(set(all_pairs) - rejs)
return list(rejs), not_rejected, list(set(subsetsli)), res | recursively check all pairs of vals for minimum distance
step down method as in Newman-Keuls and Ryan procedures. This is not a
closed procedure since not all partitions are checked.
Parameters
----------
vals : array_like
values that are pairwise compared
dcrit : array_like or float
critical distance for rejecting, either float, or 2-dimensional array
with distances on the upper triangle.
Returns
-------
rejs : list of pairs
list of pair-indices with (strictly) larger than critical difference
nrejs : list of pairs
list of pair-indices with smaller than critical difference
lli : list of tuples
list of subsets with smaller than critical difference
res : tree
result of all comparisons (for checking)
this follows description in SPSS notes on Post-Hoc Tests
Because of the recursive structure, some comparisons are made several
times, but only unique pairs or sets are returned.
Examples
--------
>>> m = [0, 2, 2.5, 3, 6, 8, 9, 9.5,10 ]
>>> rej, nrej, ssli, res = homogeneous_subsets(m, 2)
>>> set_partition(ssli)
([(5, 6, 7, 8), (1, 2, 3), (4,)], [0])
>>> [np.array(m)[list(pp)] for pp in set_partition(ssli)[0]]
[array([ 8. , 9. , 9.5, 10. ]), array([ 2. , 2.5, 3. ]), array([ 6.])] | homogeneous_subsets | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def set_partition(ssli):
"""extract a partition from a list of tuples
this should be correctly called select largest disjoint sets.
Begun and Gabriel 1981 do not seem to be bothered by sets of accepted
hypothesis with joint elements,
e.g. maximal_accepted_sets = { {1,2,3}, {2,3,4} }
This creates a set partition from a list of sets given as tuples.
It tries to find the partition with the largest sets. That is, sets are
included after being sorted by length.
If the list does not include the singletons, then it will be only a
partial partition. Missing items are singletons (I think).
Examples
--------
>>> li
[(5, 6, 7, 8), (1, 2, 3), (4, 5), (0, 1)]
>>> set_partition(li)
([(5, 6, 7, 8), (1, 2, 3)], [0, 4])
"""
part = []
for s in sorted(list(set(ssli)), key=len)[::-1]:
# print(s,
s_ = set(s).copy()
if not any(set(s_).intersection(set(t)) for t in part):
# print('inside:', s
part.append(s)
# else: print(part
missing = list({i for ll in ssli for i in ll} - {i for ll in part for i in ll})
return part, missing | extract a partition from a list of tuples
this should be correctly called select largest disjoint sets.
Begun and Gabriel 1981 do not seem to be bothered by sets of accepted
hypothesis with joint elements,
e.g. maximal_accepted_sets = { {1,2,3}, {2,3,4} }
This creates a set partition from a list of sets given as tuples.
It tries to find the partition with the largest sets. That is, sets are
included after being sorted by length.
If the list does not include the singletons, then it will be only a
partial partition. Missing items are singletons (I think).
Examples
--------
>>> li
[(5, 6, 7, 8), (1, 2, 3), (4, 5), (0, 1)]
>>> set_partition(li)
([(5, 6, 7, 8), (1, 2, 3)], [0, 4]) | set_partition | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def set_remove_subs(ssli):
"""remove sets that are subsets of another set from a list of tuples
Parameters
----------
ssli : list of tuples
each tuple is considered as a set
Returns
-------
part : list of tuples
new list with subset tuples removed, it is sorted by set-length of tuples. The
list contains original tuples, duplicate elements are not removed.
Examples
--------
>>> set_remove_subs([(0, 1), (1, 2), (1, 2, 3), (0,)])
[(1, 2, 3), (0, 1)]
>>> set_remove_subs([(0, 1), (1, 2), (1,1, 1, 2, 3), (0,)])
[(1, 1, 1, 2, 3), (0, 1)]
"""
# TODO: maybe convert all tuples to sets immediately, but I do not need the extra efficiency
part = []
for s in sorted(list(set(ssli)), key=lambda x: len(set(x)))[::-1]:
# print(s,
# s_ = set(s).copy()
if not any(set(s).issubset(set(t)) for t in part):
# print('inside:', s
part.append(s)
# else: print(part
## missing = list(set(i for ll in ssli for i in ll)
## - set(i for ll in part for i in ll))
return part | remove sets that are subsets of another set from a list of tuples
Parameters
----------
ssli : list of tuples
each tuple is considered as a set
Returns
-------
part : list of tuples
new list with subset tuples removed, it is sorted by set-length of tuples. The
list contains original tuples, duplicate elements are not removed.
Examples
--------
>>> set_remove_subs([(0, 1), (1, 2), (1, 2, 3), (0,)])
[(1, 2, 3), (0, 1)]
>>> set_remove_subs([(0, 1), (1, 2), (1,1, 1, 2, 3), (0,)])
[(1, 1, 1, 2, 3), (0, 1)] | set_remove_subs | python | statsmodels/statsmodels | statsmodels/sandbox/stats/multicomp.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py | BSD-3-Clause |
def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4, axis=0, masknan=None):
"""Calculate the score at the given 'per' percentile of the
sequence a. For example, the score at per=50 is the median.
This function is a shortcut to mquantile
"""
per = np.asarray(per, float)
if (per < 0).any() or (per > 100.).any():
raise ValueError("The percentile should be between 0. and 100. !"\
" (got %s)" % per)
return quantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
limit=limit, axis=axis, masknan=masknan).squeeze() | Calculate the score at the given 'per' percentile of the
sequence a. For example, the score at per=50 is the median.
This function is a shortcut to mquantile | scoreatpercentile | python | statsmodels/statsmodels | statsmodels/sandbox/stats/stats_mstats_short.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_mstats_short.py | BSD-3-Clause |
def plotting_positions(data, alpha=0.4, beta=0.4, axis=0, masknan=False):
"""Returns the plotting positions (or empirical percentile points) for the
data.
Plotting positions are defined as (i-alpha)/(n+1-alpha-beta), where:
- i is the rank order statistics (starting at 1)
- n is the number of unmasked values along the given axis
- alpha and beta are two parameters.
Typical values for alpha and beta are:
- (0,1) : *p(k) = k/n* : linear interpolation of cdf (R, type 4)
- (.5,.5) : *p(k) = (k-1/2.)/n* : piecewise linear function (R, type 5)
(Bliss 1967: "Rankit")
- (0,0) : *p(k) = k/(n+1)* : Weibull (R type 6), (Van der Waerden 1952)
- (1,1) : *p(k) = (k-1)/(n-1)*. In this case, p(k) = mode[F(x[k])].
That's R default (R type 7)
- (1/3,1/3): *p(k) = (k-1/3)/(n+1/3)*. Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8), (Tukey 1962)
- (3/8,3/8): *p(k) = (k-3/8)/(n+1/4)*.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9) (Blom 1958)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
Parameters
----------
x : sequence
Input data, as a sequence or array of dimension at most 2.
prob : sequence
List of quantiles to compute.
alpha : {0.4, float} optional
Plotting positions parameter.
beta : {0.4, float} optional
Plotting positions parameter.
Notes
-----
I think the adjustments assume that there are no ties in order to be a reasonable
approximation to a continuous density function. TODO: check this
References
----------
unknown,
dates to original papers from Beasley, Erickson, Allison 2009 Behav Genet
"""
if isinstance(data, np.ma.MaskedArray):
if axis is None or data.ndim == 1:
return stats.mstats.plotting_positions(data, alpha=alpha, beta=beta)
else:
return ma.apply_along_axis(stats.mstats.plotting_positions, axis, data, alpha=alpha, beta=beta)
if masknan:
nanmask = np.isnan(data)
if nanmask.any():
marr = ma.array(data, mask=nanmask)
#code duplication:
if axis is None or data.ndim == 1:
marr = stats.mstats.plotting_positions(marr, alpha=alpha, beta=beta)
else:
marr = ma.apply_along_axis(stats.mstats.plotting_positions, axis, marr, alpha=alpha, beta=beta)
return ma.filled(marr, fill_value=np.nan)
data = np.asarray(data)
if data.size == 1: # use helper function instead
data = np.atleast_1d(data)
axis = 0
if axis is None:
data = data.ravel()
axis = 0
n = data.shape[axis]
if data.ndim == 1:
plpos = np.empty(data.shape, dtype=float)
plpos[data.argsort()] = (np.arange(1,n+1) - alpha)/(n+1.-alpha-beta)
else:
#nd assignment instead of second argsort does not look easy
plpos = (data.argsort(axis).argsort(axis) + 1. - alpha)/(n+1.-alpha-beta)
return plpos | Returns the plotting positions (or empirical percentile points) for the
data.
Plotting positions are defined as (i-alpha)/(n+1-alpha-beta), where:
- i is the rank order statistics (starting at 1)
- n is the number of unmasked values along the given axis
- alpha and beta are two parameters.
Typical values for alpha and beta are:
- (0,1) : *p(k) = k/n* : linear interpolation of cdf (R, type 4)
- (.5,.5) : *p(k) = (k-1/2.)/n* : piecewise linear function (R, type 5)
(Bliss 1967: "Rankit")
- (0,0) : *p(k) = k/(n+1)* : Weibull (R type 6), (Van der Waerden 1952)
- (1,1) : *p(k) = (k-1)/(n-1)*. In this case, p(k) = mode[F(x[k])].
That's R default (R type 7)
- (1/3,1/3): *p(k) = (k-1/3)/(n+1/3)*. Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8), (Tukey 1962)
- (3/8,3/8): *p(k) = (k-3/8)/(n+1/4)*.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9) (Blom 1958)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
Parameters
----------
x : sequence
Input data, as a sequence or array of dimension at most 2.
prob : sequence
List of quantiles to compute.
alpha : {0.4, float} optional
Plotting positions parameter.
beta : {0.4, float} optional
Plotting positions parameter.
Notes
-----
I think the adjustments assume that there are no ties in order to be a reasonable
approximation to a continuous density function. TODO: check this
References
----------
unknown,
dates to original papers from Beasley, Erickson, Allison 2009 Behav Genet | plotting_positions | python | statsmodels/statsmodels | statsmodels/sandbox/stats/stats_mstats_short.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_mstats_short.py | BSD-3-Clause |
def plotting_positions_w1d(data, weights=None, alpha=0.4, beta=0.4,
method='notnormed'):
'''Weighted plotting positions (or empirical percentile points) for the data.
observations are weighted and the plotting positions are defined as
(ws-alpha)/(n-alpha-beta), where:
- ws is the weighted rank order statistics or cumulative weighted sum,
normalized to n if method is "normed"
- n is the number of values along the given axis if method is "normed"
and total weight otherwise
- alpha and beta are two parameters.
wtd.quantile in R package Hmisc seems to use the "notnormed" version.
notnormed coincides with unweighted segment in example, drop "normed" version ?
See Also
--------
plotting_positions : unweighted version that works also with more than one
dimension and has other options
'''
x = np.atleast_1d(data)
if x.ndim > 1:
raise ValueError('currently implemented only for 1d')
if weights is None:
weights = np.ones(x.shape)
else:
weights = np.array(weights, float, copy=False, ndmin=1) #atleast_1d(weights)
if weights.shape != x.shape:
raise ValueError('if weights is given, it needs to be the same'
'shape as data')
n = len(x)
xargsort = x.argsort()
ws = weights[xargsort].cumsum()
res = np.empty(x.shape)
if method == 'normed':
res[xargsort] = (1.*ws/ws[-1]*n-alpha)/(n+1.-alpha-beta)
else:
res[xargsort] = (1.*ws-alpha)/(ws[-1]+1.-alpha-beta)
return res | Weighted plotting positions (or empirical percentile points) for the data.
observations are weighted and the plotting positions are defined as
(ws-alpha)/(n-alpha-beta), where:
- ws is the weighted rank order statistics or cumulative weighted sum,
normalized to n if method is "normed"
- n is the number of values along the given axis if method is "normed"
and total weight otherwise
- alpha and beta are two parameters.
wtd.quantile in R package Hmisc seems to use the "notnormed" version.
notnormed coincides with unweighted segment in example, drop "normed" version ?
See Also
--------
plotting_positions : unweighted version that works also with more than one
dimension and has other options | plotting_positions_w1d | python | statsmodels/statsmodels | statsmodels/sandbox/stats/stats_mstats_short.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_mstats_short.py | BSD-3-Clause |
def edf_normal_inverse_transformed(x, alpha=3./8, beta=3./8, axis=0):
'''rank based normal inverse transformed cdf
'''
from scipy import stats
ranks = plotting_positions(x, alpha=alpha, beta=alpha, axis=0, masknan=False)
ranks_transf = stats.norm.ppf(ranks)
return ranks_transf | rank based normal inverse transformed cdf | edf_normal_inverse_transformed | python | statsmodels/statsmodels | statsmodels/sandbox/stats/stats_mstats_short.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_mstats_short.py | BSD-3-Clause |
def runs_test(self, correction=True):
'''basic version of runs test
Parameters
----------
correction : bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
pvalue based on normal distribution, with integer correction
'''
self.npo = npo = (self.runs_pos).sum()
self.nne = nne = (self.runs_neg).sum()
#n_r = self.n_runs
n = npo + nne
npn = npo * nne
rmean = 2. * npn / n + 1
rvar = 2. * npn * (2.*npn - n) / n**2. / (n-1.)
rstd = np.sqrt(rvar)
rdemean = self.n_runs - rmean
if n >= 50 or not correction:
z = rdemean
else:
if rdemean > 0.5:
z = rdemean - 0.5
elif rdemean < 0.5:
z = rdemean + 0.5
else:
z = 0.
z /= rstd
pval = 2 * stats.norm.sf(np.abs(z))
return z, pval | basic version of runs test
Parameters
----------
correction : bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
pvalue based on normal distribution, with integer correction | runs_test | python | statsmodels/statsmodels | statsmodels/sandbox/stats/runs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py | BSD-3-Clause |
def runstest_1samp(x, cutoff='mean', correction=True):
'''use runs test on binary discretized data above/below cutoff
Parameters
----------
x : array_like
data, numeric
cutoff : {'mean', 'median'} or number
This specifies the cutoff to split the data into large and small
values.
correction : bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
Returns
-------
z_stat : float
test statistic, asymptotically normally distributed
p-value : float
p-value, reject the null hypothesis if it is below an type 1 error
level, alpha .
'''
x = array_like(x, "x")
if cutoff == 'mean':
cutoff = np.mean(x)
elif cutoff == 'median':
cutoff = np.median(x)
else:
cutoff = float(cutoff)
xindicator = (x >= cutoff).astype(int)
return Runs(xindicator).runs_test(correction=correction) | use runs test on binary discretized data above/below cutoff
Parameters
----------
x : array_like
data, numeric
cutoff : {'mean', 'median'} or number
This specifies the cutoff to split the data into large and small
values.
correction : bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
Returns
-------
z_stat : float
test statistic, asymptotically normally distributed
p-value : float
p-value, reject the null hypothesis if it is below an type 1 error
level, alpha . | runstest_1samp | python | statsmodels/statsmodels | statsmodels/sandbox/stats/runs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py | BSD-3-Clause |
def runstest_2samp(x, y=None, groups=None, correction=True):
'''Wald-Wolfowitz runstest for two samples
This tests whether two samples come from the same distribution.
Parameters
----------
x : array_like
data, numeric, contains either one group, if y is also given, or
both groups, if additionally a group indicator is provided
y : array_like (optional)
data, numeric
groups : array_like
group labels or indicator the data for both groups is given in a
single 1-dimensional array, x. If group labels are not [0,1], then
correction : bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
Returns
-------
z_stat : float
test statistic, asymptotically normally distributed
p-value : float
p-value, reject the null hypothesis if it is below an type 1 error
level, alpha .
Notes
-----
Wald-Wolfowitz runs test.
If there are ties, then then the test statistic and p-value that is
reported, is based on the higher p-value between sorting all tied
observations of the same group
This test is intended for continuous distributions
SAS has treatment for ties, but not clear, and sounds more complicated
(minimum and maximum possible runs prevent use of argsort)
(maybe it's not so difficult, idea: add small positive noise to first
one, run test, then to the other, run test, take max(?) p-value - DONE
This gives not the minimum and maximum of the number of runs, but should
be close. Not true, this is close to minimum but far away from maximum.
maximum number of runs would use alternating groups in the ties.)
Maybe adding random noise would be the better approach.
SAS has exact distribution for sample size <=30, does not look standard
but should be easy to add.
currently two-sided test only
This has not been verified against a reference implementation. In a short
Monte Carlo simulation where both samples are normally distribute, the test
seems to be correctly sized for larger number of observations (30 or
larger), but conservative (i.e. reject less often than nominal) with a
sample size of 10 in each group.
See Also
--------
runs_test_1samp
Runs
RunsProb
'''
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
groups = np.concatenate((np.zeros(len(x)), np.ones(len(y))))
# note reassigning x
x = np.concatenate((x, y))
gruni = np.arange(2)
elif groups is not None:
gruni = np.unique(groups)
if gruni.size != 2: # pylint: disable=E1103
raise ValueError('not exactly two groups specified')
#require groups to be numeric ???
else:
raise ValueError('either y or groups is necessary')
xargsort = np.argsort(x)
#check for ties
x_sorted = x[xargsort]
x_diff = np.diff(x_sorted) # used for detecting and handling ties
if x_diff.min() == 0:
print('ties detected') #replace with warning
x_mindiff = x_diff[x_diff > 0].min()
eps = x_mindiff/2.
xx = x.copy() #do not change original, just in case
xx[groups==gruni[0]] += eps
xargsort = np.argsort(xx)
xindicator = groups[xargsort]
z0, p0 = Runs(xindicator).runs_test(correction=correction)
xx[groups==gruni[0]] -= eps #restore xx = x
xx[groups==gruni[1]] += eps
xargsort = np.argsort(xx)
xindicator = groups[xargsort]
z1, p1 = Runs(xindicator).runs_test(correction=correction)
idx = np.argmax([p0,p1])
return [z0, z1][idx], [p0, p1][idx]
else:
xindicator = groups[xargsort]
return Runs(xindicator).runs_test(correction=correction) | Wald-Wolfowitz runstest for two samples
This tests whether two samples come from the same distribution.
Parameters
----------
x : array_like
data, numeric, contains either one group, if y is also given, or
both groups, if additionally a group indicator is provided
y : array_like (optional)
data, numeric
groups : array_like
group labels or indicator the data for both groups is given in a
single 1-dimensional array, x. If group labels are not [0,1], then
correction : bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
Returns
-------
z_stat : float
test statistic, asymptotically normally distributed
p-value : float
p-value, reject the null hypothesis if it is below an type 1 error
level, alpha .
Notes
-----
Wald-Wolfowitz runs test.
If there are ties, then then the test statistic and p-value that is
reported, is based on the higher p-value between sorting all tied
observations of the same group
This test is intended for continuous distributions
SAS has treatment for ties, but not clear, and sounds more complicated
(minimum and maximum possible runs prevent use of argsort)
(maybe it's not so difficult, idea: add small positive noise to first
one, run test, then to the other, run test, take max(?) p-value - DONE
This gives not the minimum and maximum of the number of runs, but should
be close. Not true, this is close to minimum but far away from maximum.
maximum number of runs would use alternating groups in the ties.)
Maybe adding random noise would be the better approach.
SAS has exact distribution for sample size <=30, does not look standard
but should be easy to add.
currently two-sided test only
This has not been verified against a reference implementation. In a short
Monte Carlo simulation where both samples are normally distribute, the test
seems to be correctly sized for larger number of observations (30 or
larger), but conservative (i.e. reject less often than nominal) with a
sample size of 10 in each group.
See Also
--------
runs_test_1samp
Runs
RunsProb | runstest_2samp | python | statsmodels/statsmodels | statsmodels/sandbox/stats/runs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py | BSD-3-Clause |
def pdf(self, x, k, n, p):
'''distribution of success runs of length k or more
Parameters
----------
x : float
count of runs of length n
k : int
length of runs
n : int
total number of observations or trials
p : float
probability of success in each Bernoulli trial
Returns
-------
pdf : float
probability that x runs of length of k are observed
Notes
-----
not yet vectorized
References
----------
Muselli 1996, theorem 3
'''
q = 1-p
m = np.arange(x, (n+1)//(k+1)+1)[:,None]
terms = (-1)**(m-x) * comb(m, x) * p**(m*k) * q**(m-1) \
* (comb(n - m*k, m - 1) + q * comb(n - m*k, m))
return terms.sum(0) | distribution of success runs of length k or more
Parameters
----------
x : float
count of runs of length n
k : int
length of runs
n : int
total number of observations or trials
p : float
probability of success in each Bernoulli trial
Returns
-------
pdf : float
probability that x runs of length of k are observed
Notes
-----
not yet vectorized
References
----------
Muselli 1996, theorem 3 | pdf | python | statsmodels/statsmodels | statsmodels/sandbox/stats/runs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py | BSD-3-Clause |
def median_test_ksample(x, groups):
'''chisquare test for equality of median/location
This tests whether all groups have the same fraction of observations
above the median.
Parameters
----------
x : array_like
data values stacked for all groups
groups : array_like
group labels or indicator
Returns
-------
stat : float
test statistic
pvalue : float
pvalue from the chisquare distribution
others ????
currently some test output, table and expected
'''
x = np.asarray(x)
gruni = np.unique(groups)
xli = [x[groups==group] for group in gruni]
xmedian = np.median(x)
counts_larger = np.array([(xg > xmedian).sum() for xg in xli])
counts = np.array([len(xg) for xg in xli])
counts_smaller = counts - counts_larger
nobs = counts.sum()
n_larger = (x > xmedian).sum()
n_smaller = nobs - n_larger
table = np.vstack((counts_smaller, counts_larger))
#the following should be replaced by chisquare_contingency table
expected = np.vstack((counts * 1. / nobs * n_smaller,
counts * 1. / nobs * n_larger))
if (expected < 5).any():
print('Warning: There are cells with less than 5 expected' \
'observations. The chisquare distribution might not be a good' \
'approximation for the true distribution.')
#check ddof
return stats.chisquare(table.ravel(), expected.ravel(), ddof=1), table, expected | chisquare test for equality of median/location
This tests whether all groups have the same fraction of observations
above the median.
Parameters
----------
x : array_like
data values stacked for all groups
groups : array_like
group labels or indicator
Returns
-------
stat : float
test statistic
pvalue : float
pvalue from the chisquare distribution
others ????
currently some test output, table and expected | median_test_ksample | python | statsmodels/statsmodels | statsmodels/sandbox/stats/runs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py | BSD-3-Clause |
def cochrans_q(x):
'''Cochran's Q test for identical effect of k treatments
Cochran's Q is a k-sample extension of the McNemar test. If there are only
two treatments, then Cochran's Q test and McNemar test are equivalent.
Test that the probability of success is the same for each treatment.
The alternative is that at least two treatments have a different
probability of success.
Parameters
----------
x : array_like, 2d (N,k)
data with N cases and k variables
Returns
-------
q_stat : float
test statistic
pvalue : float
pvalue from the chisquare distribution
Notes
-----
In Wikipedia terminology, rows are blocks and columns are treatments.
The number of rows N, should be large for the chisquare distribution to be
a good approximation.
The Null hypothesis of the test is that all treatments have the
same effect.
References
----------
https://en.wikipedia.org/wiki/Cochran_test
SAS Manual for NPAR TESTS
'''
warnings.warn("Deprecated, use stats.cochrans_q instead", FutureWarning)
x = np.asarray(x)
gruni = np.unique(x)
N, k = x.shape
count_row_success = (x==gruni[-1]).sum(1, float)
count_col_success = (x==gruni[-1]).sum(0, float)
count_row_ss = count_row_success.sum()
count_col_ss = count_col_success.sum()
assert count_row_ss == count_col_ss #just a calculation check
#this is SAS manual
q_stat = (k-1) * (k * np.sum(count_col_success**2) - count_col_ss**2) \
/ (k * count_row_ss - np.sum(count_row_success**2))
#Note: the denominator looks just like k times the variance of the
#columns
#Wikipedia uses a different, but equivalent expression
## q_stat = (k-1) * (k * np.sum(count_row_success**2) - count_row_ss**2) \
## / (k * count_col_ss - np.sum(count_col_success**2))
return q_stat, stats.chi2.sf(q_stat, k-1) | Cochran's Q test for identical effect of k treatments
Cochran's Q is a k-sample extension of the McNemar test. If there are only
two treatments, then Cochran's Q test and McNemar test are equivalent.
Test that the probability of success is the same for each treatment.
The alternative is that at least two treatments have a different
probability of success.
Parameters
----------
x : array_like, 2d (N,k)
data with N cases and k variables
Returns
-------
q_stat : float
test statistic
pvalue : float
pvalue from the chisquare distribution
Notes
-----
In Wikipedia terminology, rows are blocks and columns are treatments.
The number of rows N, should be large for the chisquare distribution to be
a good approximation.
The Null hypothesis of the test is that all treatments have the
same effect.
References
----------
https://en.wikipedia.org/wiki/Cochran_test
SAS Manual for NPAR TESTS | cochrans_q | python | statsmodels/statsmodels | statsmodels/sandbox/stats/runs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py | BSD-3-Clause |
def mcnemar(x, y=None, exact=True, correction=True):
'''McNemar test
Parameters
----------
x, y : array_like
two paired data samples. If y is None, then x can be a 2 by 2
contingency table. x and y can have more than one dimension, then
the results are calculated under the assumption that axis zero
contains the observation for the samples.
exact : bool
If exact is true, then the binomial distribution will be used.
If exact is false, then the chisquare distribution will be used, which
is the approximation to the distribution of the test statistic for
large sample sizes.
correction : bool
If true, then a continuity correction is used for the chisquare
distribution (if exact is false.)
Returns
-------
stat : float or int, array
The test statistic is the chisquare statistic if exact is false. If the
exact binomial distribution is used, then this contains the min(n1, n2),
where n1, n2 are cases that are zero in one sample but one in the other
sample.
pvalue : float or array
p-value of the null hypothesis of equal effects.
Notes
-----
This is a special case of Cochran's Q test. The results when the chisquare
distribution is used are identical, except for continuity correction.
'''
warnings.warn("Deprecated, use stats.TableSymmetry instead", FutureWarning)
x = np.asarray(x)
if y is None and x.shape[0] == x.shape[1]:
if x.shape[0] != 2:
raise ValueError('table needs to be 2 by 2')
n1, n2 = x[1, 0], x[0, 1]
else:
# I'm not checking here whether x and y are binary,
# is not this also paired sign test
n1 = np.sum(x < y, 0)
n2 = np.sum(x > y, 0)
if exact:
stat = np.minimum(n1, n2)
# binom is symmetric with p=0.5
pval = stats.binom.cdf(stat, n1 + n2, 0.5) * 2
pval = np.minimum(pval, 1) # limit to 1 if n1==n2
else:
corr = int(correction) # convert bool to 0 or 1
stat = (np.abs(n1 - n2) - corr)**2 / (1. * (n1 + n2))
df = 1
pval = stats.chi2.sf(stat, df)
return stat, pval | McNemar test
Parameters
----------
x, y : array_like
two paired data samples. If y is None, then x can be a 2 by 2
contingency table. x and y can have more than one dimension, then
the results are calculated under the assumption that axis zero
contains the observation for the samples.
exact : bool
If exact is true, then the binomial distribution will be used.
If exact is false, then the chisquare distribution will be used, which
is the approximation to the distribution of the test statistic for
large sample sizes.
correction : bool
If true, then a continuity correction is used for the chisquare
distribution (if exact is false.)
Returns
-------
stat : float or int, array
The test statistic is the chisquare statistic if exact is false. If the
exact binomial distribution is used, then this contains the min(n1, n2),
where n1, n2 are cases that are zero in one sample but one in the other
sample.
pvalue : float or array
p-value of the null hypothesis of equal effects.
Notes
-----
This is a special case of Cochran's Q test. The results when the chisquare
distribution is used are identical, except for continuity correction. | mcnemar | python | statsmodels/statsmodels | statsmodels/sandbox/stats/runs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py | BSD-3-Clause |
def symmetry_bowker(table):
'''Test for symmetry of a (k, k) square contingency table
This is an extension of the McNemar test to test the Null hypothesis
that the contingency table is symmetric around the main diagonal, that is
n_{i, j} = n_{j, i} for all i, j
Parameters
----------
table : array_like, 2d, (k, k)
a square contingency table that contains the count for k categories
in rows and columns.
Returns
-------
statistic : float
chisquare test statistic
p-value : float
p-value of the test statistic based on chisquare distribution
df : int
degrees of freedom of the chisquare distribution
Notes
-----
Implementation is based on the SAS documentation, R includes it in
`mcnemar.test` if the table is not 2 by 2.
The pvalue is based on the chisquare distribution which requires that the
sample size is not very small to be a good approximation of the true
distribution. For 2x2 contingency tables exact distribution can be
obtained with `mcnemar`
See Also
--------
mcnemar
'''
warnings.warn("Deprecated, use stats.TableSymmetry instead", FutureWarning)
table = np.asarray(table)
k, k2 = table.shape
if k != k2:
raise ValueError('table needs to be square')
#low_idx = np.tril_indices(k, -1) # this does not have Fortran order
upp_idx = np.triu_indices(k, 1)
tril = table.T[upp_idx] # lower triangle in column order
triu = table[upp_idx] # upper triangle in row order
stat = ((tril - triu)**2 / (tril + triu + 1e-20)).sum()
df = k * (k-1) / 2.
pval = stats.chi2.sf(stat, df)
return stat, pval, df | Test for symmetry of a (k, k) square contingency table
This is an extension of the McNemar test to test the Null hypothesis
that the contingency table is symmetric around the main diagonal, that is
n_{i, j} = n_{j, i} for all i, j
Parameters
----------
table : array_like, 2d, (k, k)
a square contingency table that contains the count for k categories
in rows and columns.
Returns
-------
statistic : float
chisquare test statistic
p-value : float
p-value of the test statistic based on chisquare distribution
df : int
degrees of freedom of the chisquare distribution
Notes
-----
Implementation is based on the SAS documentation, R includes it in
`mcnemar.test` if the table is not 2 by 2.
The pvalue is based on the chisquare distribution which requires that the
sample size is not very small to be a good approximation of the true
distribution. For 2x2 contingency tables exact distribution can be
obtained with `mcnemar`
See Also
--------
mcnemar | symmetry_bowker | python | statsmodels/statsmodels | statsmodels/sandbox/stats/runs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py | BSD-3-Clause |
def corr_equi(k_vars, rho):
'''create equicorrelated correlation matrix with rho on off diagonal
Parameters
----------
k_vars : int
number of variables, correlation matrix will be (k_vars, k_vars)
rho : float
correlation between any two random variables
Returns
-------
corr : ndarray (k_vars, k_vars)
correlation matrix
'''
corr = np.empty((k_vars, k_vars))
corr.fill(rho)
corr[np.diag_indices_from(corr)] = 1
return corr | create equicorrelated correlation matrix with rho on off diagonal
Parameters
----------
k_vars : int
number of variables, correlation matrix will be (k_vars, k_vars)
rho : float
correlation between any two random variables
Returns
-------
corr : ndarray (k_vars, k_vars)
correlation matrix | corr_equi | python | statsmodels/statsmodels | statsmodels/sandbox/panel/correlation_structures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py | BSD-3-Clause |
def corr_ar(k_vars, ar):
'''create autoregressive correlation matrix
This might be MA, not AR, process if used for residual process - check
Parameters
----------
ar : array_like, 1d
AR lag-polynomial including 1 for lag 0
'''
from scipy.linalg import toeplitz
if len(ar) < k_vars:
ar_ = np.zeros(k_vars)
ar_[:len(ar)] = ar
ar = ar_
return toeplitz(ar) | create autoregressive correlation matrix
This might be MA, not AR, process if used for residual process - check
Parameters
----------
ar : array_like, 1d
AR lag-polynomial including 1 for lag 0 | corr_ar | python | statsmodels/statsmodels | statsmodels/sandbox/panel/correlation_structures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py | BSD-3-Clause |
def corr_arma(k_vars, ar, ma):
'''create arma correlation matrix
converts arma to autoregressive lag-polynomial with k_var lags
ar and arma might need to be switched for generating residual process
Parameters
----------
ar : array_like, 1d
AR lag-polynomial including 1 for lag 0
ma : array_like, 1d
MA lag-polynomial
'''
from scipy.linalg import toeplitz
from statsmodels.tsa.arima_process import arma2ar
# TODO: flesh out the comment below about a bug in arma2ar
ar = arma2ar(ar, ma, lags=k_vars)[:k_vars] # bug in arma2ar
return toeplitz(ar) | create arma correlation matrix
converts arma to autoregressive lag-polynomial with k_var lags
ar and arma might need to be switched for generating residual process
Parameters
----------
ar : array_like, 1d
AR lag-polynomial including 1 for lag 0
ma : array_like, 1d
MA lag-polynomial | corr_arma | python | statsmodels/statsmodels | statsmodels/sandbox/panel/correlation_structures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py | BSD-3-Clause |
def corr2cov(corr, std):
'''convert correlation matrix to covariance matrix
Parameters
----------
corr : ndarray, (k_vars, k_vars)
correlation matrix
std : ndarray, (k_vars,) or scalar
standard deviation for the vector of random variables. If scalar, then
it is assumed that all variables have the same scale given by std.
'''
if np.size(std) == 1:
std = std*np.ones(corr.shape[0])
cov = corr * std[:, None] * std[None, :] # same as outer product
return cov | convert correlation matrix to covariance matrix
Parameters
----------
corr : ndarray, (k_vars, k_vars)
correlation matrix
std : ndarray, (k_vars,) or scalar
standard deviation for the vector of random variables. If scalar, then
it is assumed that all variables have the same scale given by std. | corr2cov | python | statsmodels/statsmodels | statsmodels/sandbox/panel/correlation_structures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py | BSD-3-Clause |
def whiten_ar(x, ar_coefs, order):
"""
Whiten a series of columns according to an AR(p) covariance structure.
This drops the initial conditions (Cochran-Orcut ?)
Uses loop, so for short ar polynomials only, use lfilter otherwise
This needs to improve, option on method, full additional to conditional
Parameters
----------
x : array_like, (nobs,) or (nobs, k_vars)
The data to be whitened along axis 0
ar_coefs : ndarray
coefficients of AR lag- polynomial, TODO: ar or ar_coefs?
order : int
Returns
-------
x_new : ndarray
transformed array
"""
rho = ar_coefs
x = np.array(x, np.float64)
_x = x.copy()
# TODO: dimension handling is not DRY
# I think previous code worked for 2d because of single index rows in np
if x.ndim == 2:
rho = rho[:, None]
for i in range(order):
_x[(i+1):] = _x[(i+1):] - rho[i] * x[0:-(i+1)]
return _x[order:] | Whiten a series of columns according to an AR(p) covariance structure.
This drops the initial conditions (Cochran-Orcut ?)
Uses loop, so for short ar polynomials only, use lfilter otherwise
This needs to improve, option on method, full additional to conditional
Parameters
----------
x : array_like, (nobs,) or (nobs, k_vars)
The data to be whitened along axis 0
ar_coefs : ndarray
coefficients of AR lag- polynomial, TODO: ar or ar_coefs?
order : int
Returns
-------
x_new : ndarray
transformed array | whiten_ar | python | statsmodels/statsmodels | statsmodels/sandbox/panel/correlation_structures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py | BSD-3-Clause |
def yule_walker_acov(acov, order=1, method="unbiased", df=None, inv=False):
"""
Estimate AR(p) parameters from acovf using Yule-Walker equation.
Parameters
----------
acov : array_like, 1d
auto-covariance
order : int, optional
The order of the autoregressive process. Default is 1.
inv : bool
If inv is True the inverse of R is also returned. Default is False.
Returns
-------
rho : ndarray
The estimated autoregressive coefficients
sigma
TODO
Rinv : ndarray
inverse of the Toepliz matrix
"""
return yule_walker(acov, order=order, method=method, df=df, inv=inv,
demean=False) | Estimate AR(p) parameters from acovf using Yule-Walker equation.
Parameters
----------
acov : array_like, 1d
auto-covariance
order : int, optional
The order of the autoregressive process. Default is 1.
inv : bool
If inv is True the inverse of R is also returned. Default is False.
Returns
-------
rho : ndarray
The estimated autoregressive coefficients
sigma
TODO
Rinv : ndarray
inverse of the Toepliz matrix | yule_walker_acov | python | statsmodels/statsmodels | statsmodels/sandbox/panel/correlation_structures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py | BSD-3-Clause |
def generate_panel(self):
'''
generate endog for a random panel dataset with within correlation
'''
if self.y_true is None:
self.get_y_true()
nobs_i = self.nobs_i
n_groups = self.n_groups
use_balanced = True
if use_balanced: #much faster for balanced case
noise = self.random_state.multivariate_normal(np.zeros(nobs_i),
self.cov,
size=n_groups).ravel()
#need to add self.group_means
noise += np.repeat(self.group_means, nobs_i)
else:
noise = np.empty(self.nobs, np.float64)
noise.fill(np.nan)
for ii in range(self.n_groups):
#print ii,
idx, idxupp = self.group_indices[ii:ii+2]
#print idx, idxupp
mean_i = self.group_means[ii]
noise[idx:idxupp] = self.random_state.multivariate_normal(
mean_i * np.ones(self.nobs_i), self.cov)
endog = self.y_true + noise
return endog | generate endog for a random panel dataset with within correlation | generate_panel | python | statsmodels/statsmodels | statsmodels/sandbox/panel/random_panel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/random_panel.py | BSD-3-Clause |
def sum_outer_product_loop(x, group_iter):
'''sum outerproduct dot(x_i, x_i.T) over individuals
loop version
'''
mom = 0
for g in group_iter():
x_g = x[g]
#print 'x_g.shape', x_g.shape
mom += np.outer(x_g, x_g)
return mom | sum outerproduct dot(x_i, x_i.T) over individuals
loop version | sum_outer_product_loop | python | statsmodels/statsmodels | statsmodels/sandbox/panel/panel_short.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panel_short.py | BSD-3-Clause |
def sum_outer_product_balanced(x, n_groups):
'''sum outerproduct dot(x_i, x_i.T) over individuals
where x_i is (nobs_i, 1), and result is (nobs_i, nobs_i)
reshape-dot version, for x.ndim=1 only
'''
xrs = x.reshape(-1, n_groups, order='F')
return np.dot(xrs, xrs.T) #should be (nobs_i, nobs_i) | sum outerproduct dot(x_i, x_i.T) over individuals
where x_i is (nobs_i, 1), and result is (nobs_i, nobs_i)
reshape-dot version, for x.ndim=1 only | sum_outer_product_balanced | python | statsmodels/statsmodels | statsmodels/sandbox/panel/panel_short.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panel_short.py | BSD-3-Clause |
def whiten_individuals_loop(x, transform, group_iter):
'''apply linear transform for each individual
loop version
'''
#Note: figure out dimension of transformed variable
#so we can pre-allocate
x_new = []
for g in group_iter():
x_g = x[g]
x_new.append(np.dot(transform, x_g))
return np.concatenate(x_new) #np.vstack(x_new) #or np.array(x_new) #check shape | apply linear transform for each individual
loop version | whiten_individuals_loop | python | statsmodels/statsmodels | statsmodels/sandbox/panel/panel_short.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panel_short.py | BSD-3-Clause |
def fit_iterative(self, maxiter=3):
"""
Perform an iterative two-step procedure to estimate the GLS model.
Parameters
----------
maxiter : int, optional
the number of iterations
Notes
-----
maxiter=1: returns the estimated based on given weights
maxiter=2: performs a second estimation with the updated weights,
this is 2-step estimation
maxiter>2: iteratively estimate and update the weights
TODO: possible extension stop iteration if change in parameter
estimates is smaller than x_tol
Repeated calls to fit_iterative, will do one redundant pinv_wexog
calculation. Calling fit_iterative(maxiter) once does not do any
redundant recalculations (whitening or calculating pinv_wexog).
"""
#Note: in contrast to GLSHet, we do not have an auxiliary regression here
# might be needed if there is more structure in cov_i
#because we only have the loop we are not attaching the ols_pooled
#initial estimate anymore compared to original version
if maxiter < 1:
raise ValueError('maxiter needs to be at least 1')
import collections
self.history = collections.defaultdict(list) #not really necessary
for i in range(maxiter):
#pinv_wexog is cached, delete it to force recalculation
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
#fit with current cov, GLS, i.e. OLS on whitened endog, exog
results = self.fit()
self.history['self_params'].append(results.params)
if not i == maxiter-1: #skip for last iteration, could break instead
#print 'ols',
self.results_old = results #store previous results for debugging
#get cov from residuals of previous regression
sigma_i = self.get_within_cov(results.resid)
self.cholsigmainv_i = np.linalg.cholesky(np.linalg.pinv(sigma_i)).T
#calculate new whitened endog and exog
self.initialize()
#note results is the wrapper, results._results is the results instance
#results._results.results_residual_regression = res_resid
return results | Perform an iterative two-step procedure to estimate the GLS model.
Parameters
----------
maxiter : int, optional
the number of iterations
Notes
-----
maxiter=1: returns the estimated based on given weights
maxiter=2: performs a second estimation with the updated weights,
this is 2-step estimation
maxiter>2: iteratively estimate and update the weights
TODO: possible extension stop iteration if change in parameter
estimates is smaller than x_tol
Repeated calls to fit_iterative, will do one redundant pinv_wexog
calculation. Calling fit_iterative(maxiter) once does not do any
redundant recalculations (whitening or calculating pinv_wexog). | fit_iterative | python | statsmodels/statsmodels | statsmodels/sandbox/panel/panel_short.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panel_short.py | BSD-3-Clause |
def _compute_S(self, D, sigma):
"""covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.3) from Laird, Lange, Stram (see help(Unit))
"""
self.S = (np.identity(self.n) * sigma**2 +
np.dot(self.Z, np.dot(D, self.Z.T))) | covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.3) from Laird, Lange, Stram (see help(Unit)) | _compute_S | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def _compute_W(self):
"""inverse covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.2) from Laird, Lange, Stram (see help(Unit))
"""
self.W = L.inv(self.S) | inverse covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.2) from Laird, Lange, Stram (see help(Unit)) | _compute_W | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def compute_P(self, Sinv):
"""projection matrix (nobs_i, nobs_i) (M in regression ?) (JP check, guessing)
Display (3.10) from Laird, Lange, Stram (see help(Unit))
W - W X Sinv X' W'
"""
t = np.dot(self.W, self.X)
self.P = self.W - np.dot(np.dot(t, Sinv), t.T) | projection matrix (nobs_i, nobs_i) (M in regression ?) (JP check, guessing)
Display (3.10) from Laird, Lange, Stram (see help(Unit))
W - W X Sinv X' W' | compute_P | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def _compute_r(self, alpha):
"""residual after removing fixed effects
Display (3.5) from Laird, Lange, Stram (see help(Unit))
"""
self.r = self.Y - np.dot(self.X, alpha) | residual after removing fixed effects
Display (3.5) from Laird, Lange, Stram (see help(Unit)) | _compute_r | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def _compute_b(self, D):
"""coefficients for random effects/coefficients
Display (3.4) from Laird, Lange, Stram (see help(Unit))
D Z' W r
"""
self.b = np.dot(D, np.dot(np.dot(self.Z.T, self.W), self.r)) | coefficients for random effects/coefficients
Display (3.4) from Laird, Lange, Stram (see help(Unit))
D Z' W r | _compute_b | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def fit(self, a, D, sigma):
"""
Compute unit specific parameters in
Laird, Lange, Stram (see help(Unit)).
Displays (3.2)-(3.5).
"""
self._compute_S(D, sigma) #random effect plus error covariance
self._compute_W() #inv(S)
self._compute_r(a) #residual after removing fixed effects/exogs
self._compute_b(D) #? coefficients on random exog, Z ? | Compute unit specific parameters in
Laird, Lange, Stram (see help(Unit)).
Displays (3.2)-(3.5). | fit | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def compute_xtwy(self):
"""
Utility function to compute X^tWY (transposed ?) for Unit instance.
"""
return np.dot(np.dot(self.W, self.Y), self.X) #is this transposed ? | Utility function to compute X^tWY (transposed ?) for Unit instance. | compute_xtwy | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def compute_xtwx(self):
"""
Utility function to compute X^tWX for Unit instance.
"""
return np.dot(np.dot(self.X.T, self.W), self.X) | Utility function to compute X^tWX for Unit instance. | compute_xtwx | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def cov_random(self, D, Sinv=None):
"""
Approximate covariance of estimates of random effects. Just after
Display (3.10) in Laird, Lange, Stram (see help(Unit)).
D - D' Z' P Z D
Notes
-----
In example where the mean of the random coefficient is not zero, this
is not a covariance but a non-centered moment. (proof by example)
"""
if Sinv is not None:
self.compute_P(Sinv)
t = np.dot(self.Z, D)
return D - np.dot(np.dot(t.T, self.P), t) | Approximate covariance of estimates of random effects. Just after
Display (3.10) in Laird, Lange, Stram (see help(Unit)).
D - D' Z' P Z D
Notes
-----
In example where the mean of the random coefficient is not zero, this
is not a covariance but a non-centered moment. (proof by example) | cov_random | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def logL(self, a, ML=False):
"""
Individual contributions to the log-likelihood, tries to return REML
contribution by default though this requires estimated
fixed effect a to be passed as an argument.
no constant with pi included
a is not used if ML=true (should be a=None in signature)
If ML is false, then the residuals are calculated for the given fixed
effects parameters a.
"""
if ML:
return (np.log(L.det(self.W)) - (self.r * np.dot(self.W, self.r)).sum()) / 2.
else:
if a is None:
raise ValueError('need fixed effect a for REML contribution to log-likelihood')
r = self.Y - np.dot(self.X, a)
return (np.log(L.det(self.W)) - (r * np.dot(self.W, r)).sum()) / 2. | Individual contributions to the log-likelihood, tries to return REML
contribution by default though this requires estimated
fixed effect a to be passed as an argument.
no constant with pi included
a is not used if ML=true (should be a=None in signature)
If ML is false, then the residuals are calculated for the given fixed
effects parameters a. | logL | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return - 2 * self.logL(ML=ML) | deviance defined as 2 times the negative loglikelihood | deviance | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def _compute_a(self):
"""fixed effects parameters
Display (3.1) of
Laird, Lange, Stram (see help(Mixed)).
"""
for unit in self.units:
unit.fit(self.a, self.D, self.sigma)
S = sum([unit.compute_xtwx() for unit in self.units])
Y = sum([unit.compute_xtwy() for unit in self.units])
self.Sinv = L.pinv(S)
self.a = np.dot(self.Sinv, Y) | fixed effects parameters
Display (3.1) of
Laird, Lange, Stram (see help(Mixed)). | _compute_a | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def _compute_sigma(self, ML=False):
"""
Estimate sigma. If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.6) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.8).
sigma is the standard deviation of the noise (residual)
"""
sigmasq = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
t = unit.r - np.dot(unit.Z, unit.b)
sigmasq += np.power(t, 2).sum()
sigmasq += self.sigma**2 * np.trace(np.identity(unit.n) -
self.sigma**2 * W)
self.sigma = np.sqrt(sigmasq / self.N) | Estimate sigma. If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.6) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.8).
sigma is the standard deviation of the noise (residual) | _compute_sigma | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def _compute_D(self, ML=False):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.7) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.9).
"""
D = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
D += np.multiply.outer(unit.b, unit.b)
t = np.dot(unit.Z, self.D)
D += self.D - np.dot(np.dot(t.T, W), t)
self.D = D / self.m | Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.7) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.9). | _compute_D | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def cov_fixed(self):
"""
Approximate covariance of estimates of fixed effects.
Just after Display (3.10) in Laird, Lange, Stram (see help(Mixed)).
"""
return self.Sinv | Approximate covariance of estimates of fixed effects.
Just after Display (3.10) in Laird, Lange, Stram (see help(Mixed)). | cov_fixed | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def cov_random(self):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma, else return the REML estimate.
see _compute_D, alias for self.D
"""
return self.D | Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma, else return the REML estimate.
see _compute_D, alias for self.D | cov_random | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def params(self):
'''
estimated coefficients for exogeneous variables or fixed effects
see _compute_a, alias for self.a
'''
return self.a | estimated coefficients for exogeneous variables or fixed effects
see _compute_a, alias for self.a | params | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def params_random_units(self):
'''random coefficients for each unit
'''
return np.array([unit.b for unit in self.units]) | random coefficients for each unit | params_random_units | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.