code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
182
url
stringlengths
46
251
license
stringclasses
4 values
def get_distance_matrix(self): """studentized range statistic""" # make into property, decorate dres = distance_st_range(self.vals, self.nobs_all, self.var_all, df=self.df) self.distance_matrix = dres[0]
studentized range statistic
get_distance_matrix
python
statsmodels/statsmodels
statsmodels/sandbox/stats/multicomp.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py
BSD-3-Clause
def iter_subsets(self, indices): """Iterate substeps""" for ii in range(len(indices)): idxsub = copy.copy(indices) idxsub.pop(ii) yield idxsub
Iterate substeps
iter_subsets
python
statsmodels/statsmodels
statsmodels/sandbox/stats/multicomp.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py
BSD-3-Clause
def check_set(self, indices): """check whether pairwise distances of indices satisfy condition""" indtup = tuple(indices) if indtup in self.cache_result: return self.cache_result[indtup] else: set_distance_matrix = self.distance_matrix[ np.asarray(indices)[:, None], indices ] n_elements = len(indices) if np.any(set_distance_matrix > self.crit[n_elements - 1]): res = True else: res = False self.cache_result[indtup] = res return res
check whether pairwise distances of indices satisfy condition
check_set
python
statsmodels/statsmodels
statsmodels/sandbox/stats/multicomp.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py
BSD-3-Clause
def stepdown(self, indices): """stepdown""" print(indices) if self.check_set(indices): # larger than critical distance if len(indices) > 2: # step down into subsets if more than 2 elements for subs in self.iter_subsets(indices): self.stepdown(subs) else: self.rejected.append(tuple(indices)) else: self.accepted.append(tuple(indices)) return indices
stepdown
stepdown
python
statsmodels/statsmodels
statsmodels/sandbox/stats/multicomp.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py
BSD-3-Clause
def run(self, alpha): """main function to run the test, could be done in __call__ instead this could have all the initialization code """ self.cache_result = {} self.crit = self.get_crit(alpha) # decide where to set alpha, moved to run self.accepted = [] # store accepted sets, not unique self.rejected = [] self.get_distance_matrix() self.stepdown(lrange(self.n_vals)) return list(set(self.accepted)), list(set(sd.rejected))
main function to run the test, could be done in __call__ instead this could have all the initialization code
run
python
statsmodels/statsmodels
statsmodels/sandbox/stats/multicomp.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py
BSD-3-Clause
def subsets(vals, indices_): """recursive function for constructing homogeneous subset registers rejected and subsetli in outer scope """ i, j = (indices_[0], indices_[-1]) if vals[-1] - vals[0] > dcrit[i, j]: rejected.append((indices_[0], indices_[-1])) return [ subsets(vals[:-1], indices_[:-1]), subsets(vals[1:], indices_[1:]), (indices_[0], indices_[-1]), ] else: subsetsli.append(tuple(indices_)) return indices_
recursive function for constructing homogeneous subset registers rejected and subsetli in outer scope
homogeneous_subsets.subsets
python
statsmodels/statsmodels
statsmodels/sandbox/stats/multicomp.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py
BSD-3-Clause
def homogeneous_subsets(vals, dcrit): """recursively check all pairs of vals for minimum distance step down method as in Newman-Keuls and Ryan procedures. This is not a closed procedure since not all partitions are checked. Parameters ---------- vals : array_like values that are pairwise compared dcrit : array_like or float critical distance for rejecting, either float, or 2-dimensional array with distances on the upper triangle. Returns ------- rejs : list of pairs list of pair-indices with (strictly) larger than critical difference nrejs : list of pairs list of pair-indices with smaller than critical difference lli : list of tuples list of subsets with smaller than critical difference res : tree result of all comparisons (for checking) this follows description in SPSS notes on Post-Hoc Tests Because of the recursive structure, some comparisons are made several times, but only unique pairs or sets are returned. Examples -------- >>> m = [0, 2, 2.5, 3, 6, 8, 9, 9.5,10 ] >>> rej, nrej, ssli, res = homogeneous_subsets(m, 2) >>> set_partition(ssli) ([(5, 6, 7, 8), (1, 2, 3), (4,)], [0]) >>> [np.array(m)[list(pp)] for pp in set_partition(ssli)[0]] [array([ 8. , 9. , 9.5, 10. ]), array([ 2. , 2.5, 3. ]), array([ 6.])] """ nvals = len(vals) indices_ = lrange(nvals) rejected = [] subsetsli = [] if np.size(dcrit) == 1: dcrit = dcrit * np.ones((nvals, nvals)) # example numbers for experimenting def subsets(vals, indices_): """recursive function for constructing homogeneous subset registers rejected and subsetli in outer scope """ i, j = (indices_[0], indices_[-1]) if vals[-1] - vals[0] > dcrit[i, j]: rejected.append((indices_[0], indices_[-1])) return [ subsets(vals[:-1], indices_[:-1]), subsets(vals[1:], indices_[1:]), (indices_[0], indices_[-1]), ] else: subsetsli.append(tuple(indices_)) return indices_ res = subsets(vals, indices_) all_pairs = [(i, j) for i in range(nvals) for j in range(nvals - 1, i, -1)] rejs = set(rejected) not_rejected = list(set(all_pairs) - rejs) return list(rejs), not_rejected, list(set(subsetsli)), res
recursively check all pairs of vals for minimum distance step down method as in Newman-Keuls and Ryan procedures. This is not a closed procedure since not all partitions are checked. Parameters ---------- vals : array_like values that are pairwise compared dcrit : array_like or float critical distance for rejecting, either float, or 2-dimensional array with distances on the upper triangle. Returns ------- rejs : list of pairs list of pair-indices with (strictly) larger than critical difference nrejs : list of pairs list of pair-indices with smaller than critical difference lli : list of tuples list of subsets with smaller than critical difference res : tree result of all comparisons (for checking) this follows description in SPSS notes on Post-Hoc Tests Because of the recursive structure, some comparisons are made several times, but only unique pairs or sets are returned. Examples -------- >>> m = [0, 2, 2.5, 3, 6, 8, 9, 9.5,10 ] >>> rej, nrej, ssli, res = homogeneous_subsets(m, 2) >>> set_partition(ssli) ([(5, 6, 7, 8), (1, 2, 3), (4,)], [0]) >>> [np.array(m)[list(pp)] for pp in set_partition(ssli)[0]] [array([ 8. , 9. , 9.5, 10. ]), array([ 2. , 2.5, 3. ]), array([ 6.])]
homogeneous_subsets
python
statsmodels/statsmodels
statsmodels/sandbox/stats/multicomp.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py
BSD-3-Clause
def set_partition(ssli): """extract a partition from a list of tuples this should be correctly called select largest disjoint sets. Begun and Gabriel 1981 do not seem to be bothered by sets of accepted hypothesis with joint elements, e.g. maximal_accepted_sets = { {1,2,3}, {2,3,4} } This creates a set partition from a list of sets given as tuples. It tries to find the partition with the largest sets. That is, sets are included after being sorted by length. If the list does not include the singletons, then it will be only a partial partition. Missing items are singletons (I think). Examples -------- >>> li [(5, 6, 7, 8), (1, 2, 3), (4, 5), (0, 1)] >>> set_partition(li) ([(5, 6, 7, 8), (1, 2, 3)], [0, 4]) """ part = [] for s in sorted(list(set(ssli)), key=len)[::-1]: # print(s, s_ = set(s).copy() if not any(set(s_).intersection(set(t)) for t in part): # print('inside:', s part.append(s) # else: print(part missing = list({i for ll in ssli for i in ll} - {i for ll in part for i in ll}) return part, missing
extract a partition from a list of tuples this should be correctly called select largest disjoint sets. Begun and Gabriel 1981 do not seem to be bothered by sets of accepted hypothesis with joint elements, e.g. maximal_accepted_sets = { {1,2,3}, {2,3,4} } This creates a set partition from a list of sets given as tuples. It tries to find the partition with the largest sets. That is, sets are included after being sorted by length. If the list does not include the singletons, then it will be only a partial partition. Missing items are singletons (I think). Examples -------- >>> li [(5, 6, 7, 8), (1, 2, 3), (4, 5), (0, 1)] >>> set_partition(li) ([(5, 6, 7, 8), (1, 2, 3)], [0, 4])
set_partition
python
statsmodels/statsmodels
statsmodels/sandbox/stats/multicomp.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py
BSD-3-Clause
def set_remove_subs(ssli): """remove sets that are subsets of another set from a list of tuples Parameters ---------- ssli : list of tuples each tuple is considered as a set Returns ------- part : list of tuples new list with subset tuples removed, it is sorted by set-length of tuples. The list contains original tuples, duplicate elements are not removed. Examples -------- >>> set_remove_subs([(0, 1), (1, 2), (1, 2, 3), (0,)]) [(1, 2, 3), (0, 1)] >>> set_remove_subs([(0, 1), (1, 2), (1,1, 1, 2, 3), (0,)]) [(1, 1, 1, 2, 3), (0, 1)] """ # TODO: maybe convert all tuples to sets immediately, but I do not need the extra efficiency part = [] for s in sorted(list(set(ssli)), key=lambda x: len(set(x)))[::-1]: # print(s, # s_ = set(s).copy() if not any(set(s).issubset(set(t)) for t in part): # print('inside:', s part.append(s) # else: print(part ## missing = list(set(i for ll in ssli for i in ll) ## - set(i for ll in part for i in ll)) return part
remove sets that are subsets of another set from a list of tuples Parameters ---------- ssli : list of tuples each tuple is considered as a set Returns ------- part : list of tuples new list with subset tuples removed, it is sorted by set-length of tuples. The list contains original tuples, duplicate elements are not removed. Examples -------- >>> set_remove_subs([(0, 1), (1, 2), (1, 2, 3), (0,)]) [(1, 2, 3), (0, 1)] >>> set_remove_subs([(0, 1), (1, 2), (1,1, 1, 2, 3), (0,)]) [(1, 1, 1, 2, 3), (0, 1)]
set_remove_subs
python
statsmodels/statsmodels
statsmodels/sandbox/stats/multicomp.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/multicomp.py
BSD-3-Clause
def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4, axis=0, masknan=None): """Calculate the score at the given 'per' percentile of the sequence a. For example, the score at per=50 is the median. This function is a shortcut to mquantile """ per = np.asarray(per, float) if (per < 0).any() or (per > 100.).any(): raise ValueError("The percentile should be between 0. and 100. !"\ " (got %s)" % per) return quantiles(data, prob=[per/100.], alphap=alphap, betap=betap, limit=limit, axis=axis, masknan=masknan).squeeze()
Calculate the score at the given 'per' percentile of the sequence a. For example, the score at per=50 is the median. This function is a shortcut to mquantile
scoreatpercentile
python
statsmodels/statsmodels
statsmodels/sandbox/stats/stats_mstats_short.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_mstats_short.py
BSD-3-Clause
def plotting_positions(data, alpha=0.4, beta=0.4, axis=0, masknan=False): """Returns the plotting positions (or empirical percentile points) for the data. Plotting positions are defined as (i-alpha)/(n+1-alpha-beta), where: - i is the rank order statistics (starting at 1) - n is the number of unmasked values along the given axis - alpha and beta are two parameters. Typical values for alpha and beta are: - (0,1) : *p(k) = k/n* : linear interpolation of cdf (R, type 4) - (.5,.5) : *p(k) = (k-1/2.)/n* : piecewise linear function (R, type 5) (Bliss 1967: "Rankit") - (0,0) : *p(k) = k/(n+1)* : Weibull (R type 6), (Van der Waerden 1952) - (1,1) : *p(k) = (k-1)/(n-1)*. In this case, p(k) = mode[F(x[k])]. That's R default (R type 7) - (1/3,1/3): *p(k) = (k-1/3)/(n+1/3)*. Then p(k) ~ median[F(x[k])]. The resulting quantile estimates are approximately median-unbiased regardless of the distribution of x. (R type 8), (Tukey 1962) - (3/8,3/8): *p(k) = (k-3/8)/(n+1/4)*. The resulting quantile estimates are approximately unbiased if x is normally distributed (R type 9) (Blom 1958) - (.4,.4) : approximately quantile unbiased (Cunnane) - (.35,.35): APL, used with PWM Parameters ---------- x : sequence Input data, as a sequence or array of dimension at most 2. prob : sequence List of quantiles to compute. alpha : {0.4, float} optional Plotting positions parameter. beta : {0.4, float} optional Plotting positions parameter. Notes ----- I think the adjustments assume that there are no ties in order to be a reasonable approximation to a continuous density function. TODO: check this References ---------- unknown, dates to original papers from Beasley, Erickson, Allison 2009 Behav Genet """ if isinstance(data, np.ma.MaskedArray): if axis is None or data.ndim == 1: return stats.mstats.plotting_positions(data, alpha=alpha, beta=beta) else: return ma.apply_along_axis(stats.mstats.plotting_positions, axis, data, alpha=alpha, beta=beta) if masknan: nanmask = np.isnan(data) if nanmask.any(): marr = ma.array(data, mask=nanmask) #code duplication: if axis is None or data.ndim == 1: marr = stats.mstats.plotting_positions(marr, alpha=alpha, beta=beta) else: marr = ma.apply_along_axis(stats.mstats.plotting_positions, axis, marr, alpha=alpha, beta=beta) return ma.filled(marr, fill_value=np.nan) data = np.asarray(data) if data.size == 1: # use helper function instead data = np.atleast_1d(data) axis = 0 if axis is None: data = data.ravel() axis = 0 n = data.shape[axis] if data.ndim == 1: plpos = np.empty(data.shape, dtype=float) plpos[data.argsort()] = (np.arange(1,n+1) - alpha)/(n+1.-alpha-beta) else: #nd assignment instead of second argsort does not look easy plpos = (data.argsort(axis).argsort(axis) + 1. - alpha)/(n+1.-alpha-beta) return plpos
Returns the plotting positions (or empirical percentile points) for the data. Plotting positions are defined as (i-alpha)/(n+1-alpha-beta), where: - i is the rank order statistics (starting at 1) - n is the number of unmasked values along the given axis - alpha and beta are two parameters. Typical values for alpha and beta are: - (0,1) : *p(k) = k/n* : linear interpolation of cdf (R, type 4) - (.5,.5) : *p(k) = (k-1/2.)/n* : piecewise linear function (R, type 5) (Bliss 1967: "Rankit") - (0,0) : *p(k) = k/(n+1)* : Weibull (R type 6), (Van der Waerden 1952) - (1,1) : *p(k) = (k-1)/(n-1)*. In this case, p(k) = mode[F(x[k])]. That's R default (R type 7) - (1/3,1/3): *p(k) = (k-1/3)/(n+1/3)*. Then p(k) ~ median[F(x[k])]. The resulting quantile estimates are approximately median-unbiased regardless of the distribution of x. (R type 8), (Tukey 1962) - (3/8,3/8): *p(k) = (k-3/8)/(n+1/4)*. The resulting quantile estimates are approximately unbiased if x is normally distributed (R type 9) (Blom 1958) - (.4,.4) : approximately quantile unbiased (Cunnane) - (.35,.35): APL, used with PWM Parameters ---------- x : sequence Input data, as a sequence or array of dimension at most 2. prob : sequence List of quantiles to compute. alpha : {0.4, float} optional Plotting positions parameter. beta : {0.4, float} optional Plotting positions parameter. Notes ----- I think the adjustments assume that there are no ties in order to be a reasonable approximation to a continuous density function. TODO: check this References ---------- unknown, dates to original papers from Beasley, Erickson, Allison 2009 Behav Genet
plotting_positions
python
statsmodels/statsmodels
statsmodels/sandbox/stats/stats_mstats_short.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_mstats_short.py
BSD-3-Clause
def plotting_positions_w1d(data, weights=None, alpha=0.4, beta=0.4, method='notnormed'): '''Weighted plotting positions (or empirical percentile points) for the data. observations are weighted and the plotting positions are defined as (ws-alpha)/(n-alpha-beta), where: - ws is the weighted rank order statistics or cumulative weighted sum, normalized to n if method is "normed" - n is the number of values along the given axis if method is "normed" and total weight otherwise - alpha and beta are two parameters. wtd.quantile in R package Hmisc seems to use the "notnormed" version. notnormed coincides with unweighted segment in example, drop "normed" version ? See Also -------- plotting_positions : unweighted version that works also with more than one dimension and has other options ''' x = np.atleast_1d(data) if x.ndim > 1: raise ValueError('currently implemented only for 1d') if weights is None: weights = np.ones(x.shape) else: weights = np.array(weights, float, copy=False, ndmin=1) #atleast_1d(weights) if weights.shape != x.shape: raise ValueError('if weights is given, it needs to be the same' 'shape as data') n = len(x) xargsort = x.argsort() ws = weights[xargsort].cumsum() res = np.empty(x.shape) if method == 'normed': res[xargsort] = (1.*ws/ws[-1]*n-alpha)/(n+1.-alpha-beta) else: res[xargsort] = (1.*ws-alpha)/(ws[-1]+1.-alpha-beta) return res
Weighted plotting positions (or empirical percentile points) for the data. observations are weighted and the plotting positions are defined as (ws-alpha)/(n-alpha-beta), where: - ws is the weighted rank order statistics or cumulative weighted sum, normalized to n if method is "normed" - n is the number of values along the given axis if method is "normed" and total weight otherwise - alpha and beta are two parameters. wtd.quantile in R package Hmisc seems to use the "notnormed" version. notnormed coincides with unweighted segment in example, drop "normed" version ? See Also -------- plotting_positions : unweighted version that works also with more than one dimension and has other options
plotting_positions_w1d
python
statsmodels/statsmodels
statsmodels/sandbox/stats/stats_mstats_short.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_mstats_short.py
BSD-3-Clause
def edf_normal_inverse_transformed(x, alpha=3./8, beta=3./8, axis=0): '''rank based normal inverse transformed cdf ''' from scipy import stats ranks = plotting_positions(x, alpha=alpha, beta=alpha, axis=0, masknan=False) ranks_transf = stats.norm.ppf(ranks) return ranks_transf
rank based normal inverse transformed cdf
edf_normal_inverse_transformed
python
statsmodels/statsmodels
statsmodels/sandbox/stats/stats_mstats_short.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/stats_mstats_short.py
BSD-3-Clause
def runs_test(self, correction=True): '''basic version of runs test Parameters ---------- correction : bool Following the SAS manual, for samplesize below 50, the test statistic is corrected by 0.5. This can be turned off with correction=False, and was included to match R, tseries, which does not use any correction. pvalue based on normal distribution, with integer correction ''' self.npo = npo = (self.runs_pos).sum() self.nne = nne = (self.runs_neg).sum() #n_r = self.n_runs n = npo + nne npn = npo * nne rmean = 2. * npn / n + 1 rvar = 2. * npn * (2.*npn - n) / n**2. / (n-1.) rstd = np.sqrt(rvar) rdemean = self.n_runs - rmean if n >= 50 or not correction: z = rdemean else: if rdemean > 0.5: z = rdemean - 0.5 elif rdemean < 0.5: z = rdemean + 0.5 else: z = 0. z /= rstd pval = 2 * stats.norm.sf(np.abs(z)) return z, pval
basic version of runs test Parameters ---------- correction : bool Following the SAS manual, for samplesize below 50, the test statistic is corrected by 0.5. This can be turned off with correction=False, and was included to match R, tseries, which does not use any correction. pvalue based on normal distribution, with integer correction
runs_test
python
statsmodels/statsmodels
statsmodels/sandbox/stats/runs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py
BSD-3-Clause
def runstest_1samp(x, cutoff='mean', correction=True): '''use runs test on binary discretized data above/below cutoff Parameters ---------- x : array_like data, numeric cutoff : {'mean', 'median'} or number This specifies the cutoff to split the data into large and small values. correction : bool Following the SAS manual, for samplesize below 50, the test statistic is corrected by 0.5. This can be turned off with correction=False, and was included to match R, tseries, which does not use any correction. Returns ------- z_stat : float test statistic, asymptotically normally distributed p-value : float p-value, reject the null hypothesis if it is below an type 1 error level, alpha . ''' x = array_like(x, "x") if cutoff == 'mean': cutoff = np.mean(x) elif cutoff == 'median': cutoff = np.median(x) else: cutoff = float(cutoff) xindicator = (x >= cutoff).astype(int) return Runs(xindicator).runs_test(correction=correction)
use runs test on binary discretized data above/below cutoff Parameters ---------- x : array_like data, numeric cutoff : {'mean', 'median'} or number This specifies the cutoff to split the data into large and small values. correction : bool Following the SAS manual, for samplesize below 50, the test statistic is corrected by 0.5. This can be turned off with correction=False, and was included to match R, tseries, which does not use any correction. Returns ------- z_stat : float test statistic, asymptotically normally distributed p-value : float p-value, reject the null hypothesis if it is below an type 1 error level, alpha .
runstest_1samp
python
statsmodels/statsmodels
statsmodels/sandbox/stats/runs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py
BSD-3-Clause
def runstest_2samp(x, y=None, groups=None, correction=True): '''Wald-Wolfowitz runstest for two samples This tests whether two samples come from the same distribution. Parameters ---------- x : array_like data, numeric, contains either one group, if y is also given, or both groups, if additionally a group indicator is provided y : array_like (optional) data, numeric groups : array_like group labels or indicator the data for both groups is given in a single 1-dimensional array, x. If group labels are not [0,1], then correction : bool Following the SAS manual, for samplesize below 50, the test statistic is corrected by 0.5. This can be turned off with correction=False, and was included to match R, tseries, which does not use any correction. Returns ------- z_stat : float test statistic, asymptotically normally distributed p-value : float p-value, reject the null hypothesis if it is below an type 1 error level, alpha . Notes ----- Wald-Wolfowitz runs test. If there are ties, then then the test statistic and p-value that is reported, is based on the higher p-value between sorting all tied observations of the same group This test is intended for continuous distributions SAS has treatment for ties, but not clear, and sounds more complicated (minimum and maximum possible runs prevent use of argsort) (maybe it's not so difficult, idea: add small positive noise to first one, run test, then to the other, run test, take max(?) p-value - DONE This gives not the minimum and maximum of the number of runs, but should be close. Not true, this is close to minimum but far away from maximum. maximum number of runs would use alternating groups in the ties.) Maybe adding random noise would be the better approach. SAS has exact distribution for sample size <=30, does not look standard but should be easy to add. currently two-sided test only This has not been verified against a reference implementation. In a short Monte Carlo simulation where both samples are normally distribute, the test seems to be correctly sized for larger number of observations (30 or larger), but conservative (i.e. reject less often than nominal) with a sample size of 10 in each group. See Also -------- runs_test_1samp Runs RunsProb ''' x = np.asarray(x) if y is not None: y = np.asarray(y) groups = np.concatenate((np.zeros(len(x)), np.ones(len(y)))) # note reassigning x x = np.concatenate((x, y)) gruni = np.arange(2) elif groups is not None: gruni = np.unique(groups) if gruni.size != 2: # pylint: disable=E1103 raise ValueError('not exactly two groups specified') #require groups to be numeric ??? else: raise ValueError('either y or groups is necessary') xargsort = np.argsort(x) #check for ties x_sorted = x[xargsort] x_diff = np.diff(x_sorted) # used for detecting and handling ties if x_diff.min() == 0: print('ties detected') #replace with warning x_mindiff = x_diff[x_diff > 0].min() eps = x_mindiff/2. xx = x.copy() #do not change original, just in case xx[groups==gruni[0]] += eps xargsort = np.argsort(xx) xindicator = groups[xargsort] z0, p0 = Runs(xindicator).runs_test(correction=correction) xx[groups==gruni[0]] -= eps #restore xx = x xx[groups==gruni[1]] += eps xargsort = np.argsort(xx) xindicator = groups[xargsort] z1, p1 = Runs(xindicator).runs_test(correction=correction) idx = np.argmax([p0,p1]) return [z0, z1][idx], [p0, p1][idx] else: xindicator = groups[xargsort] return Runs(xindicator).runs_test(correction=correction)
Wald-Wolfowitz runstest for two samples This tests whether two samples come from the same distribution. Parameters ---------- x : array_like data, numeric, contains either one group, if y is also given, or both groups, if additionally a group indicator is provided y : array_like (optional) data, numeric groups : array_like group labels or indicator the data for both groups is given in a single 1-dimensional array, x. If group labels are not [0,1], then correction : bool Following the SAS manual, for samplesize below 50, the test statistic is corrected by 0.5. This can be turned off with correction=False, and was included to match R, tseries, which does not use any correction. Returns ------- z_stat : float test statistic, asymptotically normally distributed p-value : float p-value, reject the null hypothesis if it is below an type 1 error level, alpha . Notes ----- Wald-Wolfowitz runs test. If there are ties, then then the test statistic and p-value that is reported, is based on the higher p-value between sorting all tied observations of the same group This test is intended for continuous distributions SAS has treatment for ties, but not clear, and sounds more complicated (minimum and maximum possible runs prevent use of argsort) (maybe it's not so difficult, idea: add small positive noise to first one, run test, then to the other, run test, take max(?) p-value - DONE This gives not the minimum and maximum of the number of runs, but should be close. Not true, this is close to minimum but far away from maximum. maximum number of runs would use alternating groups in the ties.) Maybe adding random noise would be the better approach. SAS has exact distribution for sample size <=30, does not look standard but should be easy to add. currently two-sided test only This has not been verified against a reference implementation. In a short Monte Carlo simulation where both samples are normally distribute, the test seems to be correctly sized for larger number of observations (30 or larger), but conservative (i.e. reject less often than nominal) with a sample size of 10 in each group. See Also -------- runs_test_1samp Runs RunsProb
runstest_2samp
python
statsmodels/statsmodels
statsmodels/sandbox/stats/runs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py
BSD-3-Clause
def pdf(self, x, k, n, p): '''distribution of success runs of length k or more Parameters ---------- x : float count of runs of length n k : int length of runs n : int total number of observations or trials p : float probability of success in each Bernoulli trial Returns ------- pdf : float probability that x runs of length of k are observed Notes ----- not yet vectorized References ---------- Muselli 1996, theorem 3 ''' q = 1-p m = np.arange(x, (n+1)//(k+1)+1)[:,None] terms = (-1)**(m-x) * comb(m, x) * p**(m*k) * q**(m-1) \ * (comb(n - m*k, m - 1) + q * comb(n - m*k, m)) return terms.sum(0)
distribution of success runs of length k or more Parameters ---------- x : float count of runs of length n k : int length of runs n : int total number of observations or trials p : float probability of success in each Bernoulli trial Returns ------- pdf : float probability that x runs of length of k are observed Notes ----- not yet vectorized References ---------- Muselli 1996, theorem 3
pdf
python
statsmodels/statsmodels
statsmodels/sandbox/stats/runs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py
BSD-3-Clause
def median_test_ksample(x, groups): '''chisquare test for equality of median/location This tests whether all groups have the same fraction of observations above the median. Parameters ---------- x : array_like data values stacked for all groups groups : array_like group labels or indicator Returns ------- stat : float test statistic pvalue : float pvalue from the chisquare distribution others ???? currently some test output, table and expected ''' x = np.asarray(x) gruni = np.unique(groups) xli = [x[groups==group] for group in gruni] xmedian = np.median(x) counts_larger = np.array([(xg > xmedian).sum() for xg in xli]) counts = np.array([len(xg) for xg in xli]) counts_smaller = counts - counts_larger nobs = counts.sum() n_larger = (x > xmedian).sum() n_smaller = nobs - n_larger table = np.vstack((counts_smaller, counts_larger)) #the following should be replaced by chisquare_contingency table expected = np.vstack((counts * 1. / nobs * n_smaller, counts * 1. / nobs * n_larger)) if (expected < 5).any(): print('Warning: There are cells with less than 5 expected' \ 'observations. The chisquare distribution might not be a good' \ 'approximation for the true distribution.') #check ddof return stats.chisquare(table.ravel(), expected.ravel(), ddof=1), table, expected
chisquare test for equality of median/location This tests whether all groups have the same fraction of observations above the median. Parameters ---------- x : array_like data values stacked for all groups groups : array_like group labels or indicator Returns ------- stat : float test statistic pvalue : float pvalue from the chisquare distribution others ???? currently some test output, table and expected
median_test_ksample
python
statsmodels/statsmodels
statsmodels/sandbox/stats/runs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py
BSD-3-Clause
def cochrans_q(x): '''Cochran's Q test for identical effect of k treatments Cochran's Q is a k-sample extension of the McNemar test. If there are only two treatments, then Cochran's Q test and McNemar test are equivalent. Test that the probability of success is the same for each treatment. The alternative is that at least two treatments have a different probability of success. Parameters ---------- x : array_like, 2d (N,k) data with N cases and k variables Returns ------- q_stat : float test statistic pvalue : float pvalue from the chisquare distribution Notes ----- In Wikipedia terminology, rows are blocks and columns are treatments. The number of rows N, should be large for the chisquare distribution to be a good approximation. The Null hypothesis of the test is that all treatments have the same effect. References ---------- https://en.wikipedia.org/wiki/Cochran_test SAS Manual for NPAR TESTS ''' warnings.warn("Deprecated, use stats.cochrans_q instead", FutureWarning) x = np.asarray(x) gruni = np.unique(x) N, k = x.shape count_row_success = (x==gruni[-1]).sum(1, float) count_col_success = (x==gruni[-1]).sum(0, float) count_row_ss = count_row_success.sum() count_col_ss = count_col_success.sum() assert count_row_ss == count_col_ss #just a calculation check #this is SAS manual q_stat = (k-1) * (k * np.sum(count_col_success**2) - count_col_ss**2) \ / (k * count_row_ss - np.sum(count_row_success**2)) #Note: the denominator looks just like k times the variance of the #columns #Wikipedia uses a different, but equivalent expression ## q_stat = (k-1) * (k * np.sum(count_row_success**2) - count_row_ss**2) \ ## / (k * count_col_ss - np.sum(count_col_success**2)) return q_stat, stats.chi2.sf(q_stat, k-1)
Cochran's Q test for identical effect of k treatments Cochran's Q is a k-sample extension of the McNemar test. If there are only two treatments, then Cochran's Q test and McNemar test are equivalent. Test that the probability of success is the same for each treatment. The alternative is that at least two treatments have a different probability of success. Parameters ---------- x : array_like, 2d (N,k) data with N cases and k variables Returns ------- q_stat : float test statistic pvalue : float pvalue from the chisquare distribution Notes ----- In Wikipedia terminology, rows are blocks and columns are treatments. The number of rows N, should be large for the chisquare distribution to be a good approximation. The Null hypothesis of the test is that all treatments have the same effect. References ---------- https://en.wikipedia.org/wiki/Cochran_test SAS Manual for NPAR TESTS
cochrans_q
python
statsmodels/statsmodels
statsmodels/sandbox/stats/runs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py
BSD-3-Clause
def mcnemar(x, y=None, exact=True, correction=True): '''McNemar test Parameters ---------- x, y : array_like two paired data samples. If y is None, then x can be a 2 by 2 contingency table. x and y can have more than one dimension, then the results are calculated under the assumption that axis zero contains the observation for the samples. exact : bool If exact is true, then the binomial distribution will be used. If exact is false, then the chisquare distribution will be used, which is the approximation to the distribution of the test statistic for large sample sizes. correction : bool If true, then a continuity correction is used for the chisquare distribution (if exact is false.) Returns ------- stat : float or int, array The test statistic is the chisquare statistic if exact is false. If the exact binomial distribution is used, then this contains the min(n1, n2), where n1, n2 are cases that are zero in one sample but one in the other sample. pvalue : float or array p-value of the null hypothesis of equal effects. Notes ----- This is a special case of Cochran's Q test. The results when the chisquare distribution is used are identical, except for continuity correction. ''' warnings.warn("Deprecated, use stats.TableSymmetry instead", FutureWarning) x = np.asarray(x) if y is None and x.shape[0] == x.shape[1]: if x.shape[0] != 2: raise ValueError('table needs to be 2 by 2') n1, n2 = x[1, 0], x[0, 1] else: # I'm not checking here whether x and y are binary, # is not this also paired sign test n1 = np.sum(x < y, 0) n2 = np.sum(x > y, 0) if exact: stat = np.minimum(n1, n2) # binom is symmetric with p=0.5 pval = stats.binom.cdf(stat, n1 + n2, 0.5) * 2 pval = np.minimum(pval, 1) # limit to 1 if n1==n2 else: corr = int(correction) # convert bool to 0 or 1 stat = (np.abs(n1 - n2) - corr)**2 / (1. * (n1 + n2)) df = 1 pval = stats.chi2.sf(stat, df) return stat, pval
McNemar test Parameters ---------- x, y : array_like two paired data samples. If y is None, then x can be a 2 by 2 contingency table. x and y can have more than one dimension, then the results are calculated under the assumption that axis zero contains the observation for the samples. exact : bool If exact is true, then the binomial distribution will be used. If exact is false, then the chisquare distribution will be used, which is the approximation to the distribution of the test statistic for large sample sizes. correction : bool If true, then a continuity correction is used for the chisquare distribution (if exact is false.) Returns ------- stat : float or int, array The test statistic is the chisquare statistic if exact is false. If the exact binomial distribution is used, then this contains the min(n1, n2), where n1, n2 are cases that are zero in one sample but one in the other sample. pvalue : float or array p-value of the null hypothesis of equal effects. Notes ----- This is a special case of Cochran's Q test. The results when the chisquare distribution is used are identical, except for continuity correction.
mcnemar
python
statsmodels/statsmodels
statsmodels/sandbox/stats/runs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py
BSD-3-Clause
def symmetry_bowker(table): '''Test for symmetry of a (k, k) square contingency table This is an extension of the McNemar test to test the Null hypothesis that the contingency table is symmetric around the main diagonal, that is n_{i, j} = n_{j, i} for all i, j Parameters ---------- table : array_like, 2d, (k, k) a square contingency table that contains the count for k categories in rows and columns. Returns ------- statistic : float chisquare test statistic p-value : float p-value of the test statistic based on chisquare distribution df : int degrees of freedom of the chisquare distribution Notes ----- Implementation is based on the SAS documentation, R includes it in `mcnemar.test` if the table is not 2 by 2. The pvalue is based on the chisquare distribution which requires that the sample size is not very small to be a good approximation of the true distribution. For 2x2 contingency tables exact distribution can be obtained with `mcnemar` See Also -------- mcnemar ''' warnings.warn("Deprecated, use stats.TableSymmetry instead", FutureWarning) table = np.asarray(table) k, k2 = table.shape if k != k2: raise ValueError('table needs to be square') #low_idx = np.tril_indices(k, -1) # this does not have Fortran order upp_idx = np.triu_indices(k, 1) tril = table.T[upp_idx] # lower triangle in column order triu = table[upp_idx] # upper triangle in row order stat = ((tril - triu)**2 / (tril + triu + 1e-20)).sum() df = k * (k-1) / 2. pval = stats.chi2.sf(stat, df) return stat, pval, df
Test for symmetry of a (k, k) square contingency table This is an extension of the McNemar test to test the Null hypothesis that the contingency table is symmetric around the main diagonal, that is n_{i, j} = n_{j, i} for all i, j Parameters ---------- table : array_like, 2d, (k, k) a square contingency table that contains the count for k categories in rows and columns. Returns ------- statistic : float chisquare test statistic p-value : float p-value of the test statistic based on chisquare distribution df : int degrees of freedom of the chisquare distribution Notes ----- Implementation is based on the SAS documentation, R includes it in `mcnemar.test` if the table is not 2 by 2. The pvalue is based on the chisquare distribution which requires that the sample size is not very small to be a good approximation of the true distribution. For 2x2 contingency tables exact distribution can be obtained with `mcnemar` See Also -------- mcnemar
symmetry_bowker
python
statsmodels/statsmodels
statsmodels/sandbox/stats/runs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/runs.py
BSD-3-Clause
def corr_equi(k_vars, rho): '''create equicorrelated correlation matrix with rho on off diagonal Parameters ---------- k_vars : int number of variables, correlation matrix will be (k_vars, k_vars) rho : float correlation between any two random variables Returns ------- corr : ndarray (k_vars, k_vars) correlation matrix ''' corr = np.empty((k_vars, k_vars)) corr.fill(rho) corr[np.diag_indices_from(corr)] = 1 return corr
create equicorrelated correlation matrix with rho on off diagonal Parameters ---------- k_vars : int number of variables, correlation matrix will be (k_vars, k_vars) rho : float correlation between any two random variables Returns ------- corr : ndarray (k_vars, k_vars) correlation matrix
corr_equi
python
statsmodels/statsmodels
statsmodels/sandbox/panel/correlation_structures.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py
BSD-3-Clause
def corr_ar(k_vars, ar): '''create autoregressive correlation matrix This might be MA, not AR, process if used for residual process - check Parameters ---------- ar : array_like, 1d AR lag-polynomial including 1 for lag 0 ''' from scipy.linalg import toeplitz if len(ar) < k_vars: ar_ = np.zeros(k_vars) ar_[:len(ar)] = ar ar = ar_ return toeplitz(ar)
create autoregressive correlation matrix This might be MA, not AR, process if used for residual process - check Parameters ---------- ar : array_like, 1d AR lag-polynomial including 1 for lag 0
corr_ar
python
statsmodels/statsmodels
statsmodels/sandbox/panel/correlation_structures.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py
BSD-3-Clause
def corr_arma(k_vars, ar, ma): '''create arma correlation matrix converts arma to autoregressive lag-polynomial with k_var lags ar and arma might need to be switched for generating residual process Parameters ---------- ar : array_like, 1d AR lag-polynomial including 1 for lag 0 ma : array_like, 1d MA lag-polynomial ''' from scipy.linalg import toeplitz from statsmodels.tsa.arima_process import arma2ar # TODO: flesh out the comment below about a bug in arma2ar ar = arma2ar(ar, ma, lags=k_vars)[:k_vars] # bug in arma2ar return toeplitz(ar)
create arma correlation matrix converts arma to autoregressive lag-polynomial with k_var lags ar and arma might need to be switched for generating residual process Parameters ---------- ar : array_like, 1d AR lag-polynomial including 1 for lag 0 ma : array_like, 1d MA lag-polynomial
corr_arma
python
statsmodels/statsmodels
statsmodels/sandbox/panel/correlation_structures.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py
BSD-3-Clause
def corr2cov(corr, std): '''convert correlation matrix to covariance matrix Parameters ---------- corr : ndarray, (k_vars, k_vars) correlation matrix std : ndarray, (k_vars,) or scalar standard deviation for the vector of random variables. If scalar, then it is assumed that all variables have the same scale given by std. ''' if np.size(std) == 1: std = std*np.ones(corr.shape[0]) cov = corr * std[:, None] * std[None, :] # same as outer product return cov
convert correlation matrix to covariance matrix Parameters ---------- corr : ndarray, (k_vars, k_vars) correlation matrix std : ndarray, (k_vars,) or scalar standard deviation for the vector of random variables. If scalar, then it is assumed that all variables have the same scale given by std.
corr2cov
python
statsmodels/statsmodels
statsmodels/sandbox/panel/correlation_structures.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py
BSD-3-Clause
def whiten_ar(x, ar_coefs, order): """ Whiten a series of columns according to an AR(p) covariance structure. This drops the initial conditions (Cochran-Orcut ?) Uses loop, so for short ar polynomials only, use lfilter otherwise This needs to improve, option on method, full additional to conditional Parameters ---------- x : array_like, (nobs,) or (nobs, k_vars) The data to be whitened along axis 0 ar_coefs : ndarray coefficients of AR lag- polynomial, TODO: ar or ar_coefs? order : int Returns ------- x_new : ndarray transformed array """ rho = ar_coefs x = np.array(x, np.float64) _x = x.copy() # TODO: dimension handling is not DRY # I think previous code worked for 2d because of single index rows in np if x.ndim == 2: rho = rho[:, None] for i in range(order): _x[(i+1):] = _x[(i+1):] - rho[i] * x[0:-(i+1)] return _x[order:]
Whiten a series of columns according to an AR(p) covariance structure. This drops the initial conditions (Cochran-Orcut ?) Uses loop, so for short ar polynomials only, use lfilter otherwise This needs to improve, option on method, full additional to conditional Parameters ---------- x : array_like, (nobs,) or (nobs, k_vars) The data to be whitened along axis 0 ar_coefs : ndarray coefficients of AR lag- polynomial, TODO: ar or ar_coefs? order : int Returns ------- x_new : ndarray transformed array
whiten_ar
python
statsmodels/statsmodels
statsmodels/sandbox/panel/correlation_structures.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py
BSD-3-Clause
def yule_walker_acov(acov, order=1, method="unbiased", df=None, inv=False): """ Estimate AR(p) parameters from acovf using Yule-Walker equation. Parameters ---------- acov : array_like, 1d auto-covariance order : int, optional The order of the autoregressive process. Default is 1. inv : bool If inv is True the inverse of R is also returned. Default is False. Returns ------- rho : ndarray The estimated autoregressive coefficients sigma TODO Rinv : ndarray inverse of the Toepliz matrix """ return yule_walker(acov, order=order, method=method, df=df, inv=inv, demean=False)
Estimate AR(p) parameters from acovf using Yule-Walker equation. Parameters ---------- acov : array_like, 1d auto-covariance order : int, optional The order of the autoregressive process. Default is 1. inv : bool If inv is True the inverse of R is also returned. Default is False. Returns ------- rho : ndarray The estimated autoregressive coefficients sigma TODO Rinv : ndarray inverse of the Toepliz matrix
yule_walker_acov
python
statsmodels/statsmodels
statsmodels/sandbox/panel/correlation_structures.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/correlation_structures.py
BSD-3-Clause
def generate_panel(self): ''' generate endog for a random panel dataset with within correlation ''' if self.y_true is None: self.get_y_true() nobs_i = self.nobs_i n_groups = self.n_groups use_balanced = True if use_balanced: #much faster for balanced case noise = self.random_state.multivariate_normal(np.zeros(nobs_i), self.cov, size=n_groups).ravel() #need to add self.group_means noise += np.repeat(self.group_means, nobs_i) else: noise = np.empty(self.nobs, np.float64) noise.fill(np.nan) for ii in range(self.n_groups): #print ii, idx, idxupp = self.group_indices[ii:ii+2] #print idx, idxupp mean_i = self.group_means[ii] noise[idx:idxupp] = self.random_state.multivariate_normal( mean_i * np.ones(self.nobs_i), self.cov) endog = self.y_true + noise return endog
generate endog for a random panel dataset with within correlation
generate_panel
python
statsmodels/statsmodels
statsmodels/sandbox/panel/random_panel.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/random_panel.py
BSD-3-Clause
def sum_outer_product_loop(x, group_iter): '''sum outerproduct dot(x_i, x_i.T) over individuals loop version ''' mom = 0 for g in group_iter(): x_g = x[g] #print 'x_g.shape', x_g.shape mom += np.outer(x_g, x_g) return mom
sum outerproduct dot(x_i, x_i.T) over individuals loop version
sum_outer_product_loop
python
statsmodels/statsmodels
statsmodels/sandbox/panel/panel_short.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panel_short.py
BSD-3-Clause
def sum_outer_product_balanced(x, n_groups): '''sum outerproduct dot(x_i, x_i.T) over individuals where x_i is (nobs_i, 1), and result is (nobs_i, nobs_i) reshape-dot version, for x.ndim=1 only ''' xrs = x.reshape(-1, n_groups, order='F') return np.dot(xrs, xrs.T) #should be (nobs_i, nobs_i)
sum outerproduct dot(x_i, x_i.T) over individuals where x_i is (nobs_i, 1), and result is (nobs_i, nobs_i) reshape-dot version, for x.ndim=1 only
sum_outer_product_balanced
python
statsmodels/statsmodels
statsmodels/sandbox/panel/panel_short.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panel_short.py
BSD-3-Clause
def whiten_individuals_loop(x, transform, group_iter): '''apply linear transform for each individual loop version ''' #Note: figure out dimension of transformed variable #so we can pre-allocate x_new = [] for g in group_iter(): x_g = x[g] x_new.append(np.dot(transform, x_g)) return np.concatenate(x_new) #np.vstack(x_new) #or np.array(x_new) #check shape
apply linear transform for each individual loop version
whiten_individuals_loop
python
statsmodels/statsmodels
statsmodels/sandbox/panel/panel_short.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panel_short.py
BSD-3-Clause
def fit_iterative(self, maxiter=3): """ Perform an iterative two-step procedure to estimate the GLS model. Parameters ---------- maxiter : int, optional the number of iterations Notes ----- maxiter=1: returns the estimated based on given weights maxiter=2: performs a second estimation with the updated weights, this is 2-step estimation maxiter>2: iteratively estimate and update the weights TODO: possible extension stop iteration if change in parameter estimates is smaller than x_tol Repeated calls to fit_iterative, will do one redundant pinv_wexog calculation. Calling fit_iterative(maxiter) once does not do any redundant recalculations (whitening or calculating pinv_wexog). """ #Note: in contrast to GLSHet, we do not have an auxiliary regression here # might be needed if there is more structure in cov_i #because we only have the loop we are not attaching the ols_pooled #initial estimate anymore compared to original version if maxiter < 1: raise ValueError('maxiter needs to be at least 1') import collections self.history = collections.defaultdict(list) #not really necessary for i in range(maxiter): #pinv_wexog is cached, delete it to force recalculation if hasattr(self, 'pinv_wexog'): del self.pinv_wexog #fit with current cov, GLS, i.e. OLS on whitened endog, exog results = self.fit() self.history['self_params'].append(results.params) if not i == maxiter-1: #skip for last iteration, could break instead #print 'ols', self.results_old = results #store previous results for debugging #get cov from residuals of previous regression sigma_i = self.get_within_cov(results.resid) self.cholsigmainv_i = np.linalg.cholesky(np.linalg.pinv(sigma_i)).T #calculate new whitened endog and exog self.initialize() #note results is the wrapper, results._results is the results instance #results._results.results_residual_regression = res_resid return results
Perform an iterative two-step procedure to estimate the GLS model. Parameters ---------- maxiter : int, optional the number of iterations Notes ----- maxiter=1: returns the estimated based on given weights maxiter=2: performs a second estimation with the updated weights, this is 2-step estimation maxiter>2: iteratively estimate and update the weights TODO: possible extension stop iteration if change in parameter estimates is smaller than x_tol Repeated calls to fit_iterative, will do one redundant pinv_wexog calculation. Calling fit_iterative(maxiter) once does not do any redundant recalculations (whitening or calculating pinv_wexog).
fit_iterative
python
statsmodels/statsmodels
statsmodels/sandbox/panel/panel_short.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panel_short.py
BSD-3-Clause
def _compute_S(self, D, sigma): """covariance of observations (nobs_i, nobs_i) (JP check) Display (3.3) from Laird, Lange, Stram (see help(Unit)) """ self.S = (np.identity(self.n) * sigma**2 + np.dot(self.Z, np.dot(D, self.Z.T)))
covariance of observations (nobs_i, nobs_i) (JP check) Display (3.3) from Laird, Lange, Stram (see help(Unit))
_compute_S
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def _compute_W(self): """inverse covariance of observations (nobs_i, nobs_i) (JP check) Display (3.2) from Laird, Lange, Stram (see help(Unit)) """ self.W = L.inv(self.S)
inverse covariance of observations (nobs_i, nobs_i) (JP check) Display (3.2) from Laird, Lange, Stram (see help(Unit))
_compute_W
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def compute_P(self, Sinv): """projection matrix (nobs_i, nobs_i) (M in regression ?) (JP check, guessing) Display (3.10) from Laird, Lange, Stram (see help(Unit)) W - W X Sinv X' W' """ t = np.dot(self.W, self.X) self.P = self.W - np.dot(np.dot(t, Sinv), t.T)
projection matrix (nobs_i, nobs_i) (M in regression ?) (JP check, guessing) Display (3.10) from Laird, Lange, Stram (see help(Unit)) W - W X Sinv X' W'
compute_P
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def _compute_r(self, alpha): """residual after removing fixed effects Display (3.5) from Laird, Lange, Stram (see help(Unit)) """ self.r = self.Y - np.dot(self.X, alpha)
residual after removing fixed effects Display (3.5) from Laird, Lange, Stram (see help(Unit))
_compute_r
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def _compute_b(self, D): """coefficients for random effects/coefficients Display (3.4) from Laird, Lange, Stram (see help(Unit)) D Z' W r """ self.b = np.dot(D, np.dot(np.dot(self.Z.T, self.W), self.r))
coefficients for random effects/coefficients Display (3.4) from Laird, Lange, Stram (see help(Unit)) D Z' W r
_compute_b
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def fit(self, a, D, sigma): """ Compute unit specific parameters in Laird, Lange, Stram (see help(Unit)). Displays (3.2)-(3.5). """ self._compute_S(D, sigma) #random effect plus error covariance self._compute_W() #inv(S) self._compute_r(a) #residual after removing fixed effects/exogs self._compute_b(D) #? coefficients on random exog, Z ?
Compute unit specific parameters in Laird, Lange, Stram (see help(Unit)). Displays (3.2)-(3.5).
fit
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def compute_xtwy(self): """ Utility function to compute X^tWY (transposed ?) for Unit instance. """ return np.dot(np.dot(self.W, self.Y), self.X) #is this transposed ?
Utility function to compute X^tWY (transposed ?) for Unit instance.
compute_xtwy
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def compute_xtwx(self): """ Utility function to compute X^tWX for Unit instance. """ return np.dot(np.dot(self.X.T, self.W), self.X)
Utility function to compute X^tWX for Unit instance.
compute_xtwx
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def cov_random(self, D, Sinv=None): """ Approximate covariance of estimates of random effects. Just after Display (3.10) in Laird, Lange, Stram (see help(Unit)). D - D' Z' P Z D Notes ----- In example where the mean of the random coefficient is not zero, this is not a covariance but a non-centered moment. (proof by example) """ if Sinv is not None: self.compute_P(Sinv) t = np.dot(self.Z, D) return D - np.dot(np.dot(t.T, self.P), t)
Approximate covariance of estimates of random effects. Just after Display (3.10) in Laird, Lange, Stram (see help(Unit)). D - D' Z' P Z D Notes ----- In example where the mean of the random coefficient is not zero, this is not a covariance but a non-centered moment. (proof by example)
cov_random
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def logL(self, a, ML=False): """ Individual contributions to the log-likelihood, tries to return REML contribution by default though this requires estimated fixed effect a to be passed as an argument. no constant with pi included a is not used if ML=true (should be a=None in signature) If ML is false, then the residuals are calculated for the given fixed effects parameters a. """ if ML: return (np.log(L.det(self.W)) - (self.r * np.dot(self.W, self.r)).sum()) / 2. else: if a is None: raise ValueError('need fixed effect a for REML contribution to log-likelihood') r = self.Y - np.dot(self.X, a) return (np.log(L.det(self.W)) - (r * np.dot(self.W, r)).sum()) / 2.
Individual contributions to the log-likelihood, tries to return REML contribution by default though this requires estimated fixed effect a to be passed as an argument. no constant with pi included a is not used if ML=true (should be a=None in signature) If ML is false, then the residuals are calculated for the given fixed effects parameters a.
logL
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def deviance(self, ML=False): '''deviance defined as 2 times the negative loglikelihood ''' return - 2 * self.logL(ML=ML)
deviance defined as 2 times the negative loglikelihood
deviance
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def _compute_a(self): """fixed effects parameters Display (3.1) of Laird, Lange, Stram (see help(Mixed)). """ for unit in self.units: unit.fit(self.a, self.D, self.sigma) S = sum([unit.compute_xtwx() for unit in self.units]) Y = sum([unit.compute_xtwy() for unit in self.units]) self.Sinv = L.pinv(S) self.a = np.dot(self.Sinv, Y)
fixed effects parameters Display (3.1) of Laird, Lange, Stram (see help(Mixed)).
_compute_a
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def _compute_sigma(self, ML=False): """ Estimate sigma. If ML is True, return the ML estimate of sigma, else return the REML estimate. If ML, this is (3.6) in Laird, Lange, Stram (see help(Mixed)), otherwise it corresponds to (3.8). sigma is the standard deviation of the noise (residual) """ sigmasq = 0. for unit in self.units: if ML: W = unit.W else: unit.compute_P(self.Sinv) W = unit.P t = unit.r - np.dot(unit.Z, unit.b) sigmasq += np.power(t, 2).sum() sigmasq += self.sigma**2 * np.trace(np.identity(unit.n) - self.sigma**2 * W) self.sigma = np.sqrt(sigmasq / self.N)
Estimate sigma. If ML is True, return the ML estimate of sigma, else return the REML estimate. If ML, this is (3.6) in Laird, Lange, Stram (see help(Mixed)), otherwise it corresponds to (3.8). sigma is the standard deviation of the noise (residual)
_compute_sigma
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def _compute_D(self, ML=False): """ Estimate random effects covariance D. If ML is True, return the ML estimate of sigma, else return the REML estimate. If ML, this is (3.7) in Laird, Lange, Stram (see help(Mixed)), otherwise it corresponds to (3.9). """ D = 0. for unit in self.units: if ML: W = unit.W else: unit.compute_P(self.Sinv) W = unit.P D += np.multiply.outer(unit.b, unit.b) t = np.dot(unit.Z, self.D) D += self.D - np.dot(np.dot(t.T, W), t) self.D = D / self.m
Estimate random effects covariance D. If ML is True, return the ML estimate of sigma, else return the REML estimate. If ML, this is (3.7) in Laird, Lange, Stram (see help(Mixed)), otherwise it corresponds to (3.9).
_compute_D
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def cov_fixed(self): """ Approximate covariance of estimates of fixed effects. Just after Display (3.10) in Laird, Lange, Stram (see help(Mixed)). """ return self.Sinv
Approximate covariance of estimates of fixed effects. Just after Display (3.10) in Laird, Lange, Stram (see help(Mixed)).
cov_fixed
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def cov_random(self): """ Estimate random effects covariance D. If ML is True, return the ML estimate of sigma, else return the REML estimate. see _compute_D, alias for self.D """ return self.D
Estimate random effects covariance D. If ML is True, return the ML estimate of sigma, else return the REML estimate. see _compute_D, alias for self.D
cov_random
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def params(self): ''' estimated coefficients for exogeneous variables or fixed effects see _compute_a, alias for self.a ''' return self.a
estimated coefficients for exogeneous variables or fixed effects see _compute_a, alias for self.a
params
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def params_random_units(self): '''random coefficients for each unit ''' return np.array([unit.b for unit in self.units])
random coefficients for each unit
params_random_units
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def cov_params(self): ''' estimated covariance for coefficients for exogeneous variables or fixed effects see cov_fixed, and Sinv in _compute_a ''' return self.cov_fixed()
estimated covariance for coefficients for exogeneous variables or fixed effects see cov_fixed, and Sinv in _compute_a
cov_params
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def bse(self): ''' standard errors of estimated coefficients for exogeneous variables (fixed) ''' return np.sqrt(np.diag(self.cov_params()))
standard errors of estimated coefficients for exogeneous variables (fixed)
bse
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def logL(self, ML=False): """ Return log-likelihood, REML by default. """ #I do not know what the difference between REML and ML is here. logL = 0. for unit in self.units: logL += unit.logL(a=self.a, ML=ML) if not ML: logL += np.log(L.det(self.Sinv)) / 2 return logL
Return log-likelihood, REML by default.
logL
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def cont(self, ML=False, rtol=1.0e-05, params_rtol=1e-5, params_atol=1e-4): '''convergence check for iterative estimation ''' self.dev, old = self.deviance(ML=ML), self.dev #self.history.append(np.hstack((self.dev, self.a))) self.history['llf'].append(self.dev) self.history['params'].append(self.a.copy()) self.history['D'].append(self.D.copy()) if np.fabs((self.dev - old) / self.dev) < rtol: #why is there times `*`? #print np.fabs((self.dev - old)), self.dev, old self.termination = 'llf' return False #break if parameters converged #TODO: check termination conditions, OR or AND if np.all(np.abs(self.a - self._a_old) < (params_rtol * self.a + params_atol)): self.termination = 'params' return False self._a_old = self.a.copy() return True
convergence check for iterative estimation
cont
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def plot_random_univariate(self, bins=None, use_loc=True): '''create plot of marginal distribution of random effects Parameters ---------- bins : int or bin edges option for bins in matplotlibs hist method. Current default is not very sophisticated. All distributions use the same setting for bins. use_loc : bool If True, then the distribution with mean given by the fixed effect is used. Returns ------- Figure figure with subplots Notes ----- What can make this fancier? Bin edges will not make sense if loc or scale differ across random effect distributions. ''' #outsource this import matplotlib.pyplot as plt from scipy.stats import norm as normal fig = plt.figure() k = self.model.k_exog_re if k > 3: rows, cols = int(np.ceil(k * 0.5)), 2 else: rows, cols = k, 1 if bins is None: #bins = self.model.n_units // 20 #TODO: just roughly, check #bins = np.sqrt(self.model.n_units) bins = 5 + 2 * self.model.n_units**(1./3.) if use_loc: loc = self.mean_random() else: loc = [0]*k scale = self.std_random() for ii in range(k): ax = fig.add_subplot(rows, cols, ii) freq, bins_, _ = ax.hist(loc[ii] + self.params_random_units[:,ii], bins=bins, normed=True) points = np.linspace(bins_[0], bins_[-1], 200) #ax.plot(points, normal.pdf(points, loc=loc, scale=scale)) #loc of sample is approx. zero, with Z appended to X #alternative, add fixed to mean ax.set_title('Random Effect %d Marginal Distribution' % ii) ax.plot(points, normal.pdf(points, loc=loc[ii], scale=scale[ii]), 'r') return fig
create plot of marginal distribution of random effects Parameters ---------- bins : int or bin edges option for bins in matplotlibs hist method. Current default is not very sophisticated. All distributions use the same setting for bins. use_loc : bool If True, then the distribution with mean given by the fixed effect is used. Returns ------- Figure figure with subplots Notes ----- What can make this fancier? Bin edges will not make sense if loc or scale differ across random effect distributions.
plot_random_univariate
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def plot_scatter_pairs(self, idx1, idx2, title=None, ax=None): '''create scatter plot of two random effects Parameters ---------- idx1, idx2 : int indices of the two random effects to display, corresponding to columns of exog_re title : None or string If None, then a default title is added ax : None or matplotlib axis instance If None, then a figure with one axis is created and returned. If ax is not None, then the scatter plot is created on it, and this axis instance is returned. Returns ------- ax_or_fig : axis or figure instance see ax parameter Notes ----- Still needs ellipse from estimated parameters ''' import matplotlib.pyplot as plt if ax is None: fig = plt.figure() ax = fig.add_subplot(1,1,1) ax_or_fig = fig re1 = self.params_random_units[:,idx1] re2 = self.params_random_units[:,idx2] ax.plot(re1, re2, 'o', alpha=0.75) if title is None: title = 'Random Effects %d and %d' % (idx1, idx2) ax.set_title(title) ax_or_fig = ax return ax_or_fig
create scatter plot of two random effects Parameters ---------- idx1, idx2 : int indices of the two random effects to display, corresponding to columns of exog_re title : None or string If None, then a default title is added ax : None or matplotlib axis instance If None, then a figure with one axis is created and returned. If ax is not None, then the scatter plot is created on it, and this axis instance is returned. Returns ------- ax_or_fig : axis or figure instance see ax parameter Notes ----- Still needs ellipse from estimated parameters
plot_scatter_pairs
python
statsmodels/statsmodels
statsmodels/sandbox/panel/mixed.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py
BSD-3-Clause
def kernel(d1, d2, r=None, weights=None): '''general product kernel hardcoded split for the example: cat1 is continuous (time), other categories are discrete weights is e.g. Bartlett for cat1 r is (0,1) indicator vector for boolean weights 1{d1_i == d2_i} returns boolean if no continuous weights are used ''' diff = d1 - d2 if (weights is None) or (r[0] == 0): #time is irrelevant or treated as categorical return np.all((r * diff) == 0) #return bool else: #time uses continuous kernel, all other categorical return weights[diff] * np.all((r[1:] * diff[1:]) == 0)
general product kernel hardcoded split for the example: cat1 is continuous (time), other categories are discrete weights is e.g. Bartlett for cat1 r is (0,1) indicator vector for boolean weights 1{d1_i == d2_i} returns boolean if no continuous weights are used
kernel
python
statsmodels/statsmodels
statsmodels/sandbox/panel/sandwich_covariance_generic.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/sandwich_covariance_generic.py
BSD-3-Clause
def aggregate_cov(x, d, r=None, weights=None): '''sum of outer procuct over groups and time selected by r This is for a generic reference implementation, it uses a nobs-nobs double loop. Parameters ---------- x : ndarray, (nobs,) or (nobs, k_vars) data, for robust standard error calculation, this is array of x_i * u_i d : ndarray, (nobs, n_groups) integer group labels, each column contains group (or time) indices r : ndarray, (n_groups,) indicator for which groups to include. If r[i] is zero, then this group is ignored. If r[i] is not zero, then the cluster robust standard errors include this group. weights : ndarray weights if the first group dimension uses a HAC kernel Returns ------- cov : ndarray (k_vars, k_vars) or scalar covariance matrix aggregates over group kernels count : int number of terms added in sum, mainly returned for cross-checking Notes ----- This uses `kernel` to calculate the weighted distance between two observations. ''' nobs = x.shape[0] #either 1d or 2d with obs in rows #next is not needed yet # if x.ndim == 2: # kvars = x.shape[1] # else: # kvars = 1 count = 0 #count non-zero pairs for cross checking, not needed res = 0 * np.outer(x[0], x[0]) #get output shape for ii in range(nobs): for jj in range(nobs): w = kernel(d[ii], d[jj], r=r, weights=weights) if w: #true or non-zero res += w * np.outer(x[0], x[0]) count *= 1 return res, count
sum of outer procuct over groups and time selected by r This is for a generic reference implementation, it uses a nobs-nobs double loop. Parameters ---------- x : ndarray, (nobs,) or (nobs, k_vars) data, for robust standard error calculation, this is array of x_i * u_i d : ndarray, (nobs, n_groups) integer group labels, each column contains group (or time) indices r : ndarray, (n_groups,) indicator for which groups to include. If r[i] is zero, then this group is ignored. If r[i] is not zero, then the cluster robust standard errors include this group. weights : ndarray weights if the first group dimension uses a HAC kernel Returns ------- cov : ndarray (k_vars, k_vars) or scalar covariance matrix aggregates over group kernels count : int number of terms added in sum, mainly returned for cross-checking Notes ----- This uses `kernel` to calculate the weighted distance between two observations.
aggregate_cov
python
statsmodels/statsmodels
statsmodels/sandbox/panel/sandwich_covariance_generic.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/sandwich_covariance_generic.py
BSD-3-Clause
def S_all_hac(x, d, nlags=1): '''HAC independent of categorical group membership ''' r = np.zeros(d.shape[1]) r[0] = 1 weights = weights_bartlett(nlags) return aggregate_cov(x, d, r=r, weights=weights)
HAC independent of categorical group membership
S_all_hac
python
statsmodels/statsmodels
statsmodels/sandbox/panel/sandwich_covariance_generic.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/sandwich_covariance_generic.py
BSD-3-Clause
def S_within_hac(x, d, nlags=1, groupidx=1): '''HAC for observations within a categorical group ''' r = np.zeros(d.shape[1]) r[0] = 1 r[groupidx] = 1 weights = weights_bartlett(nlags) return aggregate_cov(x, d, r=r, weights=weights)
HAC for observations within a categorical group
S_within_hac
python
statsmodels/statsmodels
statsmodels/sandbox/panel/sandwich_covariance_generic.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/sandwich_covariance_generic.py
BSD-3-Clause
def S_white(x, d): '''simple white heteroscedasticity robust covariance note: calculating this way is very inefficient, just for cross-checking ''' r = np.ones(d.shape[1]) #only points on diagonal return aggregate_cov(x, d, r=r, weights=None)
simple white heteroscedasticity robust covariance note: calculating this way is very inefficient, just for cross-checking
S_white
python
statsmodels/statsmodels
statsmodels/sandbox/panel/sandwich_covariance_generic.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/sandwich_covariance_generic.py
BSD-3-Clause
def group(X): """ Returns unique numeric values for groups without sorting. Examples -------- >>> X = np.array(['a','a','b','c','b','c']) >>> group(X) >>> g array([ 0., 0., 1., 2., 1., 2.]) """ uniq_dict = {} group = np.zeros(len(X)) for i in range(len(X)): if X[i] not in uniq_dict: uniq_dict.update({X[i] : len(uniq_dict)}) group[i] = uniq_dict[X[i]] return group
Returns unique numeric values for groups without sorting. Examples -------- >>> X = np.array(['a','a','b','c','b','c']) >>> group(X) >>> g array([ 0., 0., 1., 2., 1., 2.])
group
python
statsmodels/statsmodels
statsmodels/sandbox/panel/panelmod.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panelmod.py
BSD-3-Clause
def repanel_cov(groups, sigmas): '''calculate error covariance matrix for random effects model Parameters ---------- groups : ndarray, (nobs, nre) or (nobs,) array of group/category observations sigma : ndarray, (nre+1,) array of standard deviations of random effects, last element is the standard deviation of the idiosyncratic error Returns ------- omega : ndarray, (nobs, nobs) covariance matrix of error omegainv : ndarray, (nobs, nobs) inverse covariance matrix of error omegainvsqrt : ndarray, (nobs, nobs) squareroot inverse covariance matrix of error such that omega = omegainvsqrt * omegainvsqrt.T Notes ----- This does not use sparse matrices and constructs nobs by nobs matrices. Also, omegainvsqrt is not sparse, i.e. elements are non-zero ''' if groups.ndim == 1: groups = groups[:,None] nobs, nre = groups.shape omega = sigmas[-1]*np.eye(nobs) for igr in range(nre): group = groups[:,igr:igr+1] groupuniq = np.unique(group) dummygr = sigmas[igr] * (group == groupuniq).astype(float) omega += np.dot(dummygr, dummygr.T) ev, evec = np.linalg.eigh(omega) #eig does not work omegainv = np.dot(evec, (1/ev * evec).T) omegainvhalf = evec/np.sqrt(ev) return omega, omegainv, omegainvhalf
calculate error covariance matrix for random effects model Parameters ---------- groups : ndarray, (nobs, nre) or (nobs,) array of group/category observations sigma : ndarray, (nre+1,) array of standard deviations of random effects, last element is the standard deviation of the idiosyncratic error Returns ------- omega : ndarray, (nobs, nobs) covariance matrix of error omegainv : ndarray, (nobs, nobs) inverse covariance matrix of error omegainvsqrt : ndarray, (nobs, nobs) squareroot inverse covariance matrix of error such that omega = omegainvsqrt * omegainvsqrt.T Notes ----- This does not use sparse matrices and constructs nobs by nobs matrices. Also, omegainvsqrt is not sparse, i.e. elements are non-zero
repanel_cov
python
statsmodels/statsmodels
statsmodels/sandbox/panel/panelmod.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panelmod.py
BSD-3-Clause
def initialize(self, endog, exog, panel, time, xtnames, equation): """ Initialize plain array model. See PanelModel """ #TODO: for now, we are going assume a constant, and then make the first #panel the base, add a flag for this.... # get names names = equation.split(" ") self.endog_name = names[0] exog_names = names[1:] # this makes the order matter in the array self.panel_name = xtnames[0] self.time_name = xtnames[1] novar = exog.var(0) == 0 if True in novar: cons_index = np.where(novar == 1)[0][0] # constant col. num exog_names.insert(cons_index, 'cons') self._cons_index = novar # used again in fit_fixed self.exog_names = exog_names self.endog = np.squeeze(np.asarray(endog)) exog = np.asarray(exog) self.exog = exog self.panel = np.asarray(panel) self.time = np.asarray(time) self.paneluniq = np.unique(panel) self.timeuniq = np.unique(time)
Initialize plain array model. See PanelModel
initialize
python
statsmodels/statsmodels
statsmodels/sandbox/panel/panelmod.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panelmod.py
BSD-3-Clause
def _group_mean(self, X, index='oneway', counts=False, dummies=False): """ Get group means of X by time or by panel. index default is panel """ if index == 'oneway': Y = self.panel uniq = self.paneluniq elif index == 'time': Y = self.time uniq = self.timeuniq else: raise ValueError("index %s not understood" % index) print(Y, uniq, uniq[:,None], len(Y), len(uniq), len(uniq[:,None]), index) #TODO: use sparse matrices dummy = (Y == uniq[:,None]).astype(float) if X.ndim > 1: mean = np.dot(dummy,X)/dummy.sum(1)[:,None] else: mean = np.dot(dummy,X)/dummy.sum(1) if counts is False and dummies is False: return mean elif counts is True and dummies is False: return mean, dummy.sum(1) elif counts is True and dummies is True: return mean, dummy.sum(1), dummy elif counts is False and dummies is True: return mean, dummy
Get group means of X by time or by panel. index default is panel
_group_mean
python
statsmodels/statsmodels
statsmodels/sandbox/panel/panelmod.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panelmod.py
BSD-3-Clause
def fit(self, model=None, method=None, effects='oneway'): """ method : LSDV, demeaned, MLE, GLS, BE, FE, optional model : between fixed random pooled [gmm] effects : oneway time twoway femethod : demeaned (only one implemented) WLS remethod : swar - amemiya nerlove walhus Notes ----- This is unfinished. None of the method arguments work yet. Only oneway effects should work. """ if method: # get rid of this with default method = method.lower() model = model.lower() if method and method not in ["lsdv", "demeaned", "mle", "gls", "be", "fe"]: # get rid of if method with default raise ValueError("%s not a valid method" % method) # if method == "lsdv": # self.fit_lsdv(model) if model == 'pooled': return GLS(self.endog, self.exog).fit() if model == 'between': return self._fit_btwn(method, effects) if model == 'fixed': return self._fit_fixed(method, effects)
method : LSDV, demeaned, MLE, GLS, BE, FE, optional model : between fixed random pooled [gmm] effects : oneway time twoway femethod : demeaned (only one implemented) WLS remethod : swar - amemiya nerlove walhus Notes ----- This is unfinished. None of the method arguments work yet. Only oneway effects should work.
fit
python
statsmodels/statsmodels
statsmodels/sandbox/panel/panelmod.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panelmod.py
BSD-3-Clause
def arfilter(x, a): '''apply an autoregressive filter to a series x x can be 2d, a can be 1d, 2d, or 3d Parameters ---------- x : array_like data array, 1d or 2d, if 2d then observations in rows a : array_like autoregressive filter coefficients, ar lag polynomial see Notes Returns ------- y : ndarray, 2d filtered array, number of columns determined by x and a Notes ----- In general form this uses the linear filter :: y = a(L)x where x : nobs, nvars a : nlags, nvars, npoly Depending on the shape and dimension of a this uses different Lag polynomial arrays case 1 : a is 1d or (nlags,1) one lag polynomial is applied to all variables (columns of x) case 2 : a is 2d, (nlags, nvars) each series is independently filtered with its own lag polynomial, uses loop over nvar case 3 : a is 3d, (nlags, nvars, npoly) the ith column of the output array is given by the linear filter defined by the 2d array a[:,:,i], i.e. :: y[:,i] = a(.,.,i)(L) * x y[t,i] = sum_p sum_j a(p,j,i)*x(t-p,j) for p = 0,...nlags-1, j = 0,...nvars-1, for all t >= nlags Note: maybe convert to axis=1, Not TODO: initial conditions ''' x = np.asarray(x) a = np.asarray(a) if x.ndim == 1: x = x[:,None] if x.ndim > 2: raise ValueError('x array has to be 1d or 2d') nvar = x.shape[1] nlags = a.shape[0] ntrim = nlags//2 # for x is 2d with ncols >1 if a.ndim == 1: # case: identical ar filter (lag polynomial) return signal.convolve(x, a[:,None], mode='valid') # alternative: #return signal.lfilter(a,[1],x.astype(float),axis=0) elif a.ndim == 2: if min(a.shape) == 1: # case: identical ar filter (lag polynomial) return signal.convolve(x, a, mode='valid') # case: independent ar #(a bit like recserar in gauss, but no x yet) result = np.zeros((x.shape[0]-nlags+1, nvar)) for i in range(nvar): # could also use np.convolve, but easier for swiching to fft result[:,i] = signal.convolve(x[:,i], a[:,i], mode='valid') return result elif a.ndim == 3: # case: vector autoregressive with lag matrices # #not necessary: # if np.any(a.shape[1:] != nvar): # raise ValueError('if 3d shape of a has to be (nobs,nvar,nvar)') yf = signal.convolve(x[:,:,None], a) yvalid = yf[ntrim:-ntrim, yf.shape[1]//2,:] return yvalid
apply an autoregressive filter to a series x x can be 2d, a can be 1d, 2d, or 3d Parameters ---------- x : array_like data array, 1d or 2d, if 2d then observations in rows a : array_like autoregressive filter coefficients, ar lag polynomial see Notes Returns ------- y : ndarray, 2d filtered array, number of columns determined by x and a Notes ----- In general form this uses the linear filter :: y = a(L)x where x : nobs, nvars a : nlags, nvars, npoly Depending on the shape and dimension of a this uses different Lag polynomial arrays case 1 : a is 1d or (nlags,1) one lag polynomial is applied to all variables (columns of x) case 2 : a is 2d, (nlags, nvars) each series is independently filtered with its own lag polynomial, uses loop over nvar case 3 : a is 3d, (nlags, nvars, npoly) the ith column of the output array is given by the linear filter defined by the 2d array a[:,:,i], i.e. :: y[:,i] = a(.,.,i)(L) * x y[t,i] = sum_p sum_j a(p,j,i)*x(t-p,j) for p = 0,...nlags-1, j = 0,...nvars-1, for all t >= nlags Note: maybe convert to axis=1, Not TODO: initial conditions
arfilter
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/try_var_convolve.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/try_var_convolve.py
BSD-3-Clause
def VAR(x,B, const=0): ''' multivariate linear filter Parameters ---------- x: (TxK) array columns are variables, rows are observations for time period B: (PxKxK) array b_t-1 is bottom "row", b_t-P is top "row" when printing B(:,:,0) is lag polynomial matrix for variable 1 B(:,:,k) is lag polynomial matrix for variable k B(p,:,k) is pth lag for variable k B[p,:,:].T corresponds to A_p in Wikipedia const : float or array (not tested) constant added to autoregression Returns ------- xhat: (TxK) array filtered, predicted values of x array Notes ----- xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } for all i = 0,K-1, for all t=p..T xhat does not include the forecasting observation, xhat(T+1), xhat is 1 row shorter than signal.correlate References ---------- https://en.wikipedia.org/wiki/Vector_Autoregression https://en.wikipedia.org/wiki/General_matrix_notation_of_a_VAR(p) ''' p = B.shape[0] T = x.shape[0] xhat = np.zeros(x.shape) for t in range(p,T): #[p+2]:# ## print(p,T) ## print(x[t-p:t,:,np.newaxis].shape) ## print(B.shape) #print(x[t-p:t,:,np.newaxis]) xhat[t,:] = const + (x[t-p:t,:,np.newaxis]*B).sum(axis=1).sum(axis=0) return xhat
multivariate linear filter Parameters ---------- x: (TxK) array columns are variables, rows are observations for time period B: (PxKxK) array b_t-1 is bottom "row", b_t-P is top "row" when printing B(:,:,0) is lag polynomial matrix for variable 1 B(:,:,k) is lag polynomial matrix for variable k B(p,:,k) is pth lag for variable k B[p,:,:].T corresponds to A_p in Wikipedia const : float or array (not tested) constant added to autoregression Returns ------- xhat: (TxK) array filtered, predicted values of x array Notes ----- xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } for all i = 0,K-1, for all t=p..T xhat does not include the forecasting observation, xhat(T+1), xhat is 1 row shorter than signal.correlate References ---------- https://en.wikipedia.org/wiki/Vector_Autoregression https://en.wikipedia.org/wiki/General_matrix_notation_of_a_VAR(p)
VAR
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/varma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/varma.py
BSD-3-Clause
def VARMA(x,B,C, const=0): ''' multivariate linear filter x (TxK) B (PxKxK) xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } + sum{_q}sum{_k} { e(t-Q:t,:) .* C(:,:,i) }for all i = 0,K-1 ''' P = B.shape[0] Q = C.shape[0] T = x.shape[0] xhat = np.zeros(x.shape) e = np.zeros(x.shape) start = max(P,Q) for t in range(start,T): #[p+2]:# ## print(p,T ## print(x[t-p:t,:,np.newaxis].shape ## print(B.shape #print(x[t-p:t,:,np.newaxis] xhat[t,:] = const + (x[t-P:t,:,np.newaxis]*B).sum(axis=1).sum(axis=0) + \ (e[t-Q:t,:,np.newaxis]*C).sum(axis=1).sum(axis=0) e[t,:] = x[t,:] - xhat[t,:] return xhat, e
multivariate linear filter x (TxK) B (PxKxK) xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } + sum{_q}sum{_k} { e(t-Q:t,:) .* C(:,:,i) }for all i = 0,K-1
VARMA
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/varma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/varma.py
BSD-3-Clause
def padarr(self, arr, maxlag, atend=True): '''pad 1d array with zeros at end to have length maxlag function that is a method, no self used Parameters ---------- arr : array_like, 1d array that will be padded with zeros maxlag : int length of array after padding atend : bool If True (default), then the zeros are added to the end, otherwise to the front of the array Returns ------- arrp : ndarray zero-padded array Notes ----- This is mainly written to extend coefficient arrays for the lag-polynomials. It returns a copy. ''' if atend: return np.r_[arr, np.zeros(maxlag-len(arr))] else: return np.r_[np.zeros(maxlag-len(arr)), arr]
pad 1d array with zeros at end to have length maxlag function that is a method, no self used Parameters ---------- arr : array_like, 1d array that will be padded with zeros maxlag : int length of array after padding atend : bool If True (default), then the zeros are added to the end, otherwise to the front of the array Returns ------- arrp : ndarray zero-padded array Notes ----- This is mainly written to extend coefficient arrays for the lag-polynomials. It returns a copy.
padarr
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def pad(self, maxlag): '''construct AR and MA polynomials that are zero-padded to a common length Parameters ---------- maxlag : int new length of lag-polynomials Returns ------- ar : ndarray extended AR polynomial coefficients ma : ndarray extended AR polynomial coefficients ''' arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)] mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)] return arpad, mapad
construct AR and MA polynomials that are zero-padded to a common length Parameters ---------- maxlag : int new length of lag-polynomials Returns ------- ar : ndarray extended AR polynomial coefficients ma : ndarray extended AR polynomial coefficients
pad
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def fftar(self, n=None): '''Fourier transform of AR polynomial, zero-padded at end to n Parameters ---------- n : int length of array after zero-padding Returns ------- fftar : ndarray fft of zero-padded ar polynomial ''' if n is None: n = len(self.ar) return fft.fft(self.padarr(self.ar, n))
Fourier transform of AR polynomial, zero-padded at end to n Parameters ---------- n : int length of array after zero-padding Returns ------- fftar : ndarray fft of zero-padded ar polynomial
fftar
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def fftma(self, n): '''Fourier transform of MA polynomial, zero-padded at end to n Parameters ---------- n : int length of array after zero-padding Returns ------- fftar : ndarray fft of zero-padded ar polynomial ''' if n is None: n = len(self.ar) return fft.fft(self.padarr(self.ma, n))
Fourier transform of MA polynomial, zero-padded at end to n Parameters ---------- n : int length of array after zero-padding Returns ------- fftar : ndarray fft of zero-padded ar polynomial
fftma
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def fftarma(self, n=None): '''Fourier transform of ARMA polynomial, zero-padded at end to n The Fourier transform of the ARMA process is calculated as the ratio of the fft of the MA polynomial divided by the fft of the AR polynomial. Parameters ---------- n : int length of array after zero-padding Returns ------- fftarma : ndarray fft of zero-padded arma polynomial ''' if n is None: n = self.nobs return (self.fftma(n) / self.fftar(n))
Fourier transform of ARMA polynomial, zero-padded at end to n The Fourier transform of the ARMA process is calculated as the ratio of the fft of the MA polynomial divided by the fft of the AR polynomial. Parameters ---------- n : int length of array after zero-padding Returns ------- fftarma : ndarray fft of zero-padded arma polynomial
fftarma
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def spd(self, npos): '''raw spectral density, returns Fourier transform n is number of points in positive spectrum, the actual number of points is twice as large. different from other spd methods with fft ''' n = npos w = fft.fftfreq(2*n) * 2 * np.pi hw = self.fftarma(2*n) #not sure, need to check normalization #return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #does not show in plot return (hw*hw.conj()).real * 0.5 / np.pi, w
raw spectral density, returns Fourier transform n is number of points in positive spectrum, the actual number of points is twice as large. different from other spd methods with fft
spd
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def spdshift(self, n): '''power spectral density using fftshift currently returns two-sided according to fft frequencies, use first half ''' #size = s1+s2-1 mapadded = self.padarr(self.ma, n) arpadded = self.padarr(self.ar, n) hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded)) #return np.abs(spd)[n//2-1:] w = fft.fftfreq(n) * 2 * np.pi slice(n//2-1, None, None) #return (hw*hw.conj()).real[wslice], w[wslice] return (hw*hw.conj()).real, w
power spectral density using fftshift currently returns two-sided according to fft frequencies, use first half
spdshift
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def spddirect(self, n): '''power spectral density using padding to length n done by fft currently returns two-sided according to fft frequencies, use first half ''' #size = s1+s2-1 #abs looks wrong hw = fft.fft(self.ma, n) / fft.fft(self.ar, n) w = fft.fftfreq(n) * 2 * np.pi return (np.abs(hw)**2) * 0.5/np.pi, w
power spectral density using padding to length n done by fft currently returns two-sided according to fft frequencies, use first half
spddirect
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def _spddirect2(self, n): '''this looks bad, maybe with an fftshift ''' #size = s1+s2-1 hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n) / fft.fft(np.r_[self.ar[::-1],self.ar], n)) return (hw*hw.conj()) #.real[n//2-1:]
this looks bad, maybe with an fftshift
_spddirect2
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def spdroots(self, w): '''spectral density for frequency using polynomial roots builds two arrays (number of roots, number of frequencies) ''' return self._spdroots(self.arroots, self.maroots, w)
spectral density for frequency using polynomial roots builds two arrays (number of roots, number of frequencies)
spdroots
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def _spdroots(self, arroots, maroots, w): '''spectral density for frequency using polynomial roots builds two arrays (number of roots, number of frequencies) Parameters ---------- arroots : ndarray roots of ar (denominator) lag-polynomial maroots : ndarray roots of ma (numerator) lag-polynomial w : array_like frequencies for which spd is calculated Notes ----- this should go into a function ''' w = np.atleast_2d(w).T cosw = np.cos(w) #Greene 5th edt. p626, section 20.2.7.a. maroots = 1./maroots arroots = 1./arroots num = 1 + maroots**2 - 2* maroots * cosw den = 1 + arroots**2 - 2* arroots * cosw #print 'num.shape, den.shape', num.shape, den.shape hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog return np.squeeze(hw), w.squeeze()
spectral density for frequency using polynomial roots builds two arrays (number of roots, number of frequencies) Parameters ---------- arroots : ndarray roots of ar (denominator) lag-polynomial maroots : ndarray roots of ma (numerator) lag-polynomial w : array_like frequencies for which spd is calculated Notes ----- this should go into a function
_spdroots
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def spdpoly(self, w, nma=50): '''spectral density from MA polynomial representation for ARMA process References ---------- Cochrane, section 8.3.3 ''' mpoly = np.polynomial.Polynomial(self.arma2ma(nma)) hw = mpoly(np.exp(1j * w)) spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi) return spd, w
spectral density from MA polynomial representation for ARMA process References ---------- Cochrane, section 8.3.3
spdpoly
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def filter(self, x): ''' filter a timeseries with the ARMA filter padding with zero is missing, in example I needed the padding to get initial conditions identical to direct filter Initial filtered observations differ from filter2 and signal.lfilter, but at end they are the same. See Also -------- tsa.filters.fftconvolve ''' n = x.shape[0] if n == self.fftarma: fftarma = self.fftarma else: fftarma = self.fftma(n) / self.fftar(n) tmpfft = fftarma * fft.fft(x) return fft.ifft(tmpfft)
filter a timeseries with the ARMA filter padding with zero is missing, in example I needed the padding to get initial conditions identical to direct filter Initial filtered observations differ from filter2 and signal.lfilter, but at end they are the same. See Also -------- tsa.filters.fftconvolve
filter
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def filter2(self, x, pad=0): '''filter a time series using fftconvolve3 with ARMA filter padding of x currently works only if x is 1d in example it produces same observations at beginning as lfilter even without padding. TODO: this returns 1 additional observation at the end ''' from statsmodels.tsa.filters import fftconvolve3 if not pad: pass elif pad == 'auto': #just guessing how much padding x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False) else: x = self.padarr(x, x.shape[0] + int(pad), atend=False) return fftconvolve3(x, self.ma, self.ar)
filter a time series using fftconvolve3 with ARMA filter padding of x currently works only if x is 1d in example it produces same observations at beginning as lfilter even without padding. TODO: this returns 1 additional observation at the end
filter2
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def acf2spdfreq(self, acovf, nfreq=100, w=None): ''' not really a method just for comparison, not efficient for large n or long acf this is also similarly use in tsa.stattools.periodogram with window ''' if w is None: w = np.linspace(0, np.pi, nfreq)[:, None] nac = len(acovf) hw = 0.5 / np.pi * (acovf[0] + 2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1)) return hw
not really a method just for comparison, not efficient for large n or long acf this is also similarly use in tsa.stattools.periodogram with window
acf2spdfreq
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def invpowerspd(self, n): '''autocovariance from spectral density scaling is correct, but n needs to be large for numerical accuracy maybe padding with zero in fft would be faster without slicing it returns 2-sided autocovariance with fftshift >>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10] array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 , 0.045 , 0.0225 , 0.01125 , 0.005625]) >>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10) array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 , 0.045 , 0.0225 , 0.01125 , 0.005625]) ''' hw = self.fftarma(n) return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]
autocovariance from spectral density scaling is correct, but n needs to be large for numerical accuracy maybe padding with zero in fft would be faster without slicing it returns 2-sided autocovariance with fftshift >>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10] array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 , 0.045 , 0.0225 , 0.01125 , 0.005625]) >>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10) array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 , 0.045 , 0.0225 , 0.01125 , 0.005625])
invpowerspd
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def spdmapoly(self, w, twosided=False): '''ma only, need division for ar, use LagPolynomial ''' if w is None: w = np.linspace(0, np.pi, nfreq) return 0.5 / np.pi * self.mapoly(np.exp(w*1j))
ma only, need division for ar, use LagPolynomial
spdmapoly
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100): """Plot results""" rvs = self.generate_sample(nsample=100, burnin=500) acf = self.acf(nacf)[:nacf] #TODO: check return length pacf = self.pacf(nacf) w = np.linspace(0, np.pi, nfreq) spdr, wr = self.spdroots(w) if fig is None: import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(2,2,1) ax.plot(rvs) ax.set_title(f'Random Sample \nar={self.ar}, ma={self.ma}') ax = fig.add_subplot(2,2,2) ax.plot(acf) ax.set_title(f'Autocorrelation \nar={self.ar}, ma={self.ma!r}s') ax = fig.add_subplot(2,2,3) ax.plot(wr, spdr) ax.set_title(f'Power Spectrum \nar={self.ar}, ma={self.ma}') ax = fig.add_subplot(2,2,4) ax.plot(pacf) ax.set_title(f'Partial Autocorrelation \nar={self.ar}, ma={self.ma}') return fig
Plot results
plot4
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/fftarma.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py
BSD-3-Clause
def movorder(x, order = 'med', windsize=3, lag='lagged'): '''moving order statistics Parameters ---------- x : ndarray time series data order : float or 'med', 'min', 'max' which order statistic to calculate windsize : int window size lag : 'lagged', 'centered', or 'leading' location of window relative to current position Returns ------- filtered array ''' #if windsize is even should it raise ValueError if lag == 'lagged': lead = windsize//2 elif lag == 'centered': lead = 0 elif lag == 'leading': lead = -windsize//2 +1 else: raise ValueError if np.isfinite(order): #if np.isnumber(order): ord = order # note: ord is a builtin function elif order == 'med': ord = (windsize - 1)/2 elif order == 'min': ord = 0 elif order == 'max': ord = windsize - 1 else: raise ValueError #return signal.order_filter(x,np.ones(windsize),ord)[:-lead] xext = expandarr(x, windsize) #np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]] return signal.order_filter(xext,np.ones(windsize),ord)[windsize-lead:-(windsize+lead)]
moving order statistics Parameters ---------- x : ndarray time series data order : float or 'med', 'min', 'max' which order statistic to calculate windsize : int window size lag : 'lagged', 'centered', or 'leading' location of window relative to current position Returns ------- filtered array
movorder
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/movstat.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/movstat.py
BSD-3-Clause
def check_movorder(): '''graphical test for movorder''' import matplotlib.pylab as plt x = np.arange(1,10) xo = movorder(x, order='max') assert_array_equal(xo, x) x = np.arange(10,1,-1) xo = movorder(x, order='min') assert_array_equal(xo, x) assert_array_equal(movorder(x, order='min', lag='centered')[:-1], x[1:]) tt = np.linspace(0,2*np.pi,15) x = np.sin(tt) + 1 xo = movorder(x, order='max') plt.figure() plt.plot(tt,x,'.-',tt,xo,'.-') plt.title('moving max lagged') xo = movorder(x, order='max', lag='centered') plt.figure() plt.plot(tt,x,'.-',tt,xo,'.-') plt.title('moving max centered') xo = movorder(x, order='max', lag='leading') plt.figure() plt.plot(tt,x,'.-',tt,xo,'.-') plt.title('moving max leading')
graphical test for movorder
check_movorder
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/movstat.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/movstat.py
BSD-3-Clause
def movmean(x, windowsize=3, lag='lagged'): '''moving window mean Parameters ---------- x : ndarray time series data windsize : int window size lag : 'lagged', 'centered', or 'leading' location of window relative to current position Returns ------- mk : ndarray moving mean, with same shape as x Notes ----- for leading and lagging the data array x is extended by the closest value of the array ''' return movmoment(x, 1, windowsize=windowsize, lag=lag)
moving window mean Parameters ---------- x : ndarray time series data windsize : int window size lag : 'lagged', 'centered', or 'leading' location of window relative to current position Returns ------- mk : ndarray moving mean, with same shape as x Notes ----- for leading and lagging the data array x is extended by the closest value of the array
movmean
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/movstat.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/movstat.py
BSD-3-Clause
def movvar(x, windowsize=3, lag='lagged'): '''moving window variance Parameters ---------- x : ndarray time series data windsize : int window size lag : 'lagged', 'centered', or 'leading' location of window relative to current position Returns ------- mk : ndarray moving variance, with same shape as x ''' m1 = movmoment(x, 1, windowsize=windowsize, lag=lag) m2 = movmoment(x, 2, windowsize=windowsize, lag=lag) return m2 - m1*m1
moving window variance Parameters ---------- x : ndarray time series data windsize : int window size lag : 'lagged', 'centered', or 'leading' location of window relative to current position Returns ------- mk : ndarray moving variance, with same shape as x
movvar
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/movstat.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/movstat.py
BSD-3-Clause
def movmoment(x, k, windowsize=3, lag='lagged'): '''non-central moment Parameters ---------- x : ndarray time series data windsize : int window size lag : 'lagged', 'centered', or 'leading' location of window relative to current position Returns ------- mk : ndarray k-th moving non-central moment, with same shape as x Notes ----- If data x is 2d, then moving moment is calculated for each column. ''' windsize = windowsize #if windsize is even should it raise ValueError if lag == 'lagged': #lead = -0 + windsize #windsize//2 lead = -0# + (windsize-1) + windsize//2 sl = slice((windsize-1) or None, -2*(windsize-1) or None) elif lag == 'centered': lead = -windsize//2 #0#-1 #+ #(windsize-1) sl = slice((windsize-1)+windsize//2 or None, -(windsize-1)-windsize//2 or None) elif lag == 'leading': #lead = -windsize +1#+1 #+ (windsize-1)#//2 +1 lead = -windsize +2 #-windsize//2 +1 sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None) else: raise ValueError avgkern = (np.ones(windowsize)/float(windowsize)) xext = expandarr(x, windsize-1) #Note: expandarr increases the array size by 2*(windsize-1) #sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None) print(sl) if xext.ndim == 1: return np.correlate(xext**k, avgkern, 'full')[sl] #return np.correlate(xext**k, avgkern, 'same')[windsize-lead:-(windsize+lead)] else: print(xext.shape) print(avgkern[:,None].shape) # try first with 2d along columns, possibly ndim with axis return signal.correlate(xext**k, avgkern[:,None], 'full')[sl,:]
non-central moment Parameters ---------- x : ndarray time series data windsize : int window size lag : 'lagged', 'centered', or 'leading' location of window relative to current position Returns ------- mk : ndarray k-th moving non-central moment, with same shape as x Notes ----- If data x is 2d, then moving moment is calculated for each column.
movmoment
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/movstat.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/movstat.py
BSD-3-Clause
def simulateW(self, nobs=100, T=1, dt=None, nrepl=1): '''generate sample of Wiener Process ''' dt = T*1.0/nobs t = np.linspace(dt, 1, nobs) dW = np.sqrt(dt)*np.random.normal(size=(nrepl, nobs)) W = np.cumsum(dW,1) self.dW = dW return W, t
generate sample of Wiener Process
simulateW
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/diffusion.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py
BSD-3-Clause
def expectedsim(self, func, nobs=100, T=1, dt=None, nrepl=1): '''get expectation of a function of a Wiener Process by simulation initially test example from ''' W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl) U = func(t, W) Umean = U.mean(0) return U, Umean, t
get expectation of a function of a Wiener Process by simulation initially test example from
expectedsim
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/diffusion.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py
BSD-3-Clause
def simEM(self, xzero=None, nobs=100, T=1, dt=None, nrepl=1, Tratio=4): ''' from Higham 2001 TODO: reverse parameterization to start with final nobs and DT TODO: check if I can skip the loop using my way from exactprocess problem might be Winc (reshape into 3d and sum) TODO: (later) check memory efficiency for large simulations ''' #TODO: reverse parameterization to start with final nobs and DT nobs = nobs * Tratio # simple way to change parameter # maybe wrong parameterization, # drift too large, variance too small ? which dt/Dt # _drift, _sig independent of dt is wrong if xzero is None: xzero = self.xzero dW = self.dW L = nobs/Tratio # L EM steps of size Dt = R*dt Xem = np.zeros((nrepl,L)) # preallocate for efficiency Xtemp = xzero Xem[:,0] = xzero for j in np.arange(1,L): #Winc = np.sum(dW[:,Tratio*(j-1)+1:Tratio*j],1) Winc = np.sum(dW[:,np.arange(Tratio*(j-1)+1,Tratio*j)],1) #Xtemp = Xtemp + Dt*lamda*Xtemp + mu*Xtemp*Winc; Xtemp = Xtemp + self._drift(x=Xtemp) + self._sig(x=Xtemp) * Winc #Dt*lamda*Xtemp + mu*Xtemp*Winc; Xem[:,j] = Xtemp return Xem
from Higham 2001 TODO: reverse parameterization to start with final nobs and DT TODO: check if I can skip the loop using my way from exactprocess problem might be Winc (reshape into 3d and sum) TODO: (later) check memory efficiency for large simulations
simEM
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/diffusion.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py
BSD-3-Clause
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2): '''ddt : discrete delta t should be the same as an AR(1) not tested yet ''' np.linspace(ddt, nobs*ddt, nobs) #expnt = np.exp(-self.lambd * t) expddt = np.exp(-self.lambd * ddt) normrvs = np.random.normal(size=(nrepl,nobs)) #do I need lfilter here AR(1) ? if mean reverting lag-coeff<1 #lfilter does not handle 2d arrays, it does? inc = self._exactconst(expddt) + self._exactstd(expddt) * normrvs return signal.lfilter([1.], [1.,-expddt], inc)
ddt : discrete delta t should be the same as an AR(1) not tested yet
exactprocess
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/diffusion.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py
BSD-3-Clause
def exactprocess(self, nobs, xzero=None, ddt=1., nrepl=2): '''ddt : discrete delta t not tested yet ''' if xzero is None: xzero = self.xzero np.linspace(ddt, nobs*ddt, nobs) normrvs = np.random.normal(size=(nrepl,nobs)) inc = self._drift + self._sigma * np.sqrt(ddt) * normrvs #return signal.lfilter([1.], [1.,-1], inc) return xzero + np.cumsum(inc,1)
ddt : discrete delta t not tested yet
exactprocess
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/diffusion.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py
BSD-3-Clause
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2): '''ddt : discrete delta t should be the same as an AR(1) not tested yet # after writing this I saw the same use of lfilter in sitmo ''' t = np.linspace(ddt, nobs*ddt, nobs) np.exp(-self.lambd * t) expddt = np.exp(-self.lambd * ddt) normrvs = np.random.normal(size=(nrepl,nobs)) #do I need lfilter here AR(1) ? lfilter does not handle 2d arrays, it does? from scipy import signal #xzero * expnt inc = ( self.mu * (1-expddt) + self.sigma * np.sqrt((1-expddt*expddt)/2./self.lambd) * normrvs ) return signal.lfilter([1.], [1.,-expddt], inc)
ddt : discrete delta t should be the same as an AR(1) not tested yet # after writing this I saw the same use of lfilter in sitmo
exactprocess
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/diffusion.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py
BSD-3-Clause
def fitls(self, data, dt): '''assumes data is 1d, univariate time series formula from sitmo ''' # brute force, no parameter estimation errors nobs = len(data)-1 exog = np.column_stack((np.ones(nobs), data[:-1])) parest, res, rank, sing = np.linalg.lstsq(exog, data[1:], rcond=-1) const, slope = parest errvar = res/(nobs-2.) lambd = -np.log(slope)/dt sigma = np.sqrt(-errvar * 2.*np.log(slope)/ (1-slope**2)/dt) mu = const / (1-slope) return mu, lambd, sigma
assumes data is 1d, univariate time series formula from sitmo
fitls
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/diffusion.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py
BSD-3-Clause
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2): '''uses exact solution for log of process ''' np.log(xzero) lnx = super(self.__class__, self).exactprocess(xzero, nobs, ddt=ddt, nrepl=nrepl) return np.exp(lnx)
uses exact solution for log of process
exactprocess
python
statsmodels/statsmodels
statsmodels/sandbox/tsa/diffusion.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py
BSD-3-Clause