code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def cov_params(self):
'''
estimated covariance for coefficients for exogeneous variables or fixed effects
see cov_fixed, and Sinv in _compute_a
'''
return self.cov_fixed() | estimated covariance for coefficients for exogeneous variables or fixed effects
see cov_fixed, and Sinv in _compute_a | cov_params | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def bse(self):
'''
standard errors of estimated coefficients for exogeneous variables (fixed)
'''
return np.sqrt(np.diag(self.cov_params())) | standard errors of estimated coefficients for exogeneous variables (fixed) | bse | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return -2 * self.logL(ML=ML) | deviance defined as 2 times the negative loglikelihood | deviance | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def logL(self, ML=False):
"""
Return log-likelihood, REML by default.
"""
#I do not know what the difference between REML and ML is here.
logL = 0.
for unit in self.units:
logL += unit.logL(a=self.a, ML=ML)
if not ML:
logL += np.log(L.det(self.Sinv)) / 2
return logL | Return log-likelihood, REML by default. | logL | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def cont(self, ML=False, rtol=1.0e-05, params_rtol=1e-5, params_atol=1e-4):
'''convergence check for iterative estimation
'''
self.dev, old = self.deviance(ML=ML), self.dev
#self.history.append(np.hstack((self.dev, self.a)))
self.history['llf'].append(self.dev)
self.history['params'].append(self.a.copy())
self.history['D'].append(self.D.copy())
if np.fabs((self.dev - old) / self.dev) < rtol: #why is there times `*`?
#print np.fabs((self.dev - old)), self.dev, old
self.termination = 'llf'
return False
#break if parameters converged
#TODO: check termination conditions, OR or AND
if np.all(np.abs(self.a - self._a_old) < (params_rtol * self.a + params_atol)):
self.termination = 'params'
return False
self._a_old = self.a.copy()
return True | convergence check for iterative estimation | cont | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def plot_random_univariate(self, bins=None, use_loc=True):
'''create plot of marginal distribution of random effects
Parameters
----------
bins : int or bin edges
option for bins in matplotlibs hist method. Current default is not
very sophisticated. All distributions use the same setting for
bins.
use_loc : bool
If True, then the distribution with mean given by the fixed
effect is used.
Returns
-------
Figure
figure with subplots
Notes
-----
What can make this fancier?
Bin edges will not make sense if loc or scale differ across random
effect distributions.
'''
#outsource this
import matplotlib.pyplot as plt
from scipy.stats import norm as normal
fig = plt.figure()
k = self.model.k_exog_re
if k > 3:
rows, cols = int(np.ceil(k * 0.5)), 2
else:
rows, cols = k, 1
if bins is None:
#bins = self.model.n_units // 20 #TODO: just roughly, check
#bins = np.sqrt(self.model.n_units)
bins = 5 + 2 * self.model.n_units**(1./3.)
if use_loc:
loc = self.mean_random()
else:
loc = [0]*k
scale = self.std_random()
for ii in range(k):
ax = fig.add_subplot(rows, cols, ii)
freq, bins_, _ = ax.hist(loc[ii] + self.params_random_units[:,ii],
bins=bins, normed=True)
points = np.linspace(bins_[0], bins_[-1], 200)
#ax.plot(points, normal.pdf(points, loc=loc, scale=scale))
#loc of sample is approx. zero, with Z appended to X
#alternative, add fixed to mean
ax.set_title('Random Effect %d Marginal Distribution' % ii)
ax.plot(points,
normal.pdf(points, loc=loc[ii], scale=scale[ii]),
'r')
return fig | create plot of marginal distribution of random effects
Parameters
----------
bins : int or bin edges
option for bins in matplotlibs hist method. Current default is not
very sophisticated. All distributions use the same setting for
bins.
use_loc : bool
If True, then the distribution with mean given by the fixed
effect is used.
Returns
-------
Figure
figure with subplots
Notes
-----
What can make this fancier?
Bin edges will not make sense if loc or scale differ across random
effect distributions. | plot_random_univariate | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def plot_scatter_pairs(self, idx1, idx2, title=None, ax=None):
'''create scatter plot of two random effects
Parameters
----------
idx1, idx2 : int
indices of the two random effects to display, corresponding to
columns of exog_re
title : None or string
If None, then a default title is added
ax : None or matplotlib axis instance
If None, then a figure with one axis is created and returned.
If ax is not None, then the scatter plot is created on it, and
this axis instance is returned.
Returns
-------
ax_or_fig : axis or figure instance
see ax parameter
Notes
-----
Still needs ellipse from estimated parameters
'''
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax_or_fig = fig
re1 = self.params_random_units[:,idx1]
re2 = self.params_random_units[:,idx2]
ax.plot(re1, re2, 'o', alpha=0.75)
if title is None:
title = 'Random Effects %d and %d' % (idx1, idx2)
ax.set_title(title)
ax_or_fig = ax
return ax_or_fig | create scatter plot of two random effects
Parameters
----------
idx1, idx2 : int
indices of the two random effects to display, corresponding to
columns of exog_re
title : None or string
If None, then a default title is added
ax : None or matplotlib axis instance
If None, then a figure with one axis is created and returned.
If ax is not None, then the scatter plot is created on it, and
this axis instance is returned.
Returns
-------
ax_or_fig : axis or figure instance
see ax parameter
Notes
-----
Still needs ellipse from estimated parameters | plot_scatter_pairs | python | statsmodels/statsmodels | statsmodels/sandbox/panel/mixed.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/mixed.py | BSD-3-Clause |
def kernel(d1, d2, r=None, weights=None):
'''general product kernel
hardcoded split for the example:
cat1 is continuous (time), other categories are discrete
weights is e.g. Bartlett for cat1
r is (0,1) indicator vector for boolean weights 1{d1_i == d2_i}
returns boolean if no continuous weights are used
'''
diff = d1 - d2
if (weights is None) or (r[0] == 0):
#time is irrelevant or treated as categorical
return np.all((r * diff) == 0) #return bool
else:
#time uses continuous kernel, all other categorical
return weights[diff] * np.all((r[1:] * diff[1:]) == 0) | general product kernel
hardcoded split for the example:
cat1 is continuous (time), other categories are discrete
weights is e.g. Bartlett for cat1
r is (0,1) indicator vector for boolean weights 1{d1_i == d2_i}
returns boolean if no continuous weights are used | kernel | python | statsmodels/statsmodels | statsmodels/sandbox/panel/sandwich_covariance_generic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/sandwich_covariance_generic.py | BSD-3-Clause |
def aggregate_cov(x, d, r=None, weights=None):
'''sum of outer procuct over groups and time selected by r
This is for a generic reference implementation, it uses a nobs-nobs double
loop.
Parameters
----------
x : ndarray, (nobs,) or (nobs, k_vars)
data, for robust standard error calculation, this is array of x_i * u_i
d : ndarray, (nobs, n_groups)
integer group labels, each column contains group (or time) indices
r : ndarray, (n_groups,)
indicator for which groups to include. If r[i] is zero, then
this group is ignored. If r[i] is not zero, then the cluster robust
standard errors include this group.
weights : ndarray
weights if the first group dimension uses a HAC kernel
Returns
-------
cov : ndarray (k_vars, k_vars) or scalar
covariance matrix aggregates over group kernels
count : int
number of terms added in sum, mainly returned for cross-checking
Notes
-----
This uses `kernel` to calculate the weighted distance between two
observations.
'''
nobs = x.shape[0] #either 1d or 2d with obs in rows
#next is not needed yet
# if x.ndim == 2:
# kvars = x.shape[1]
# else:
# kvars = 1
count = 0 #count non-zero pairs for cross checking, not needed
res = 0 * np.outer(x[0], x[0]) #get output shape
for ii in range(nobs):
for jj in range(nobs):
w = kernel(d[ii], d[jj], r=r, weights=weights)
if w: #true or non-zero
res += w * np.outer(x[0], x[0])
count *= 1
return res, count | sum of outer procuct over groups and time selected by r
This is for a generic reference implementation, it uses a nobs-nobs double
loop.
Parameters
----------
x : ndarray, (nobs,) or (nobs, k_vars)
data, for robust standard error calculation, this is array of x_i * u_i
d : ndarray, (nobs, n_groups)
integer group labels, each column contains group (or time) indices
r : ndarray, (n_groups,)
indicator for which groups to include. If r[i] is zero, then
this group is ignored. If r[i] is not zero, then the cluster robust
standard errors include this group.
weights : ndarray
weights if the first group dimension uses a HAC kernel
Returns
-------
cov : ndarray (k_vars, k_vars) or scalar
covariance matrix aggregates over group kernels
count : int
number of terms added in sum, mainly returned for cross-checking
Notes
-----
This uses `kernel` to calculate the weighted distance between two
observations. | aggregate_cov | python | statsmodels/statsmodels | statsmodels/sandbox/panel/sandwich_covariance_generic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/sandwich_covariance_generic.py | BSD-3-Clause |
def S_all_hac(x, d, nlags=1):
'''HAC independent of categorical group membership
'''
r = np.zeros(d.shape[1])
r[0] = 1
weights = weights_bartlett(nlags)
return aggregate_cov(x, d, r=r, weights=weights) | HAC independent of categorical group membership | S_all_hac | python | statsmodels/statsmodels | statsmodels/sandbox/panel/sandwich_covariance_generic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/sandwich_covariance_generic.py | BSD-3-Clause |
def S_within_hac(x, d, nlags=1, groupidx=1):
'''HAC for observations within a categorical group
'''
r = np.zeros(d.shape[1])
r[0] = 1
r[groupidx] = 1
weights = weights_bartlett(nlags)
return aggregate_cov(x, d, r=r, weights=weights) | HAC for observations within a categorical group | S_within_hac | python | statsmodels/statsmodels | statsmodels/sandbox/panel/sandwich_covariance_generic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/sandwich_covariance_generic.py | BSD-3-Clause |
def S_white(x, d):
'''simple white heteroscedasticity robust covariance
note: calculating this way is very inefficient, just for cross-checking
'''
r = np.ones(d.shape[1]) #only points on diagonal
return aggregate_cov(x, d, r=r, weights=None) | simple white heteroscedasticity robust covariance
note: calculating this way is very inefficient, just for cross-checking | S_white | python | statsmodels/statsmodels | statsmodels/sandbox/panel/sandwich_covariance_generic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/sandwich_covariance_generic.py | BSD-3-Clause |
def group(X):
"""
Returns unique numeric values for groups without sorting.
Examples
--------
>>> X = np.array(['a','a','b','c','b','c'])
>>> group(X)
>>> g
array([ 0., 0., 1., 2., 1., 2.])
"""
uniq_dict = {}
group = np.zeros(len(X))
for i in range(len(X)):
if X[i] not in uniq_dict:
uniq_dict.update({X[i] : len(uniq_dict)})
group[i] = uniq_dict[X[i]]
return group | Returns unique numeric values for groups without sorting.
Examples
--------
>>> X = np.array(['a','a','b','c','b','c'])
>>> group(X)
>>> g
array([ 0., 0., 1., 2., 1., 2.]) | group | python | statsmodels/statsmodels | statsmodels/sandbox/panel/panelmod.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panelmod.py | BSD-3-Clause |
def repanel_cov(groups, sigmas):
'''calculate error covariance matrix for random effects model
Parameters
----------
groups : ndarray, (nobs, nre) or (nobs,)
array of group/category observations
sigma : ndarray, (nre+1,)
array of standard deviations of random effects,
last element is the standard deviation of the
idiosyncratic error
Returns
-------
omega : ndarray, (nobs, nobs)
covariance matrix of error
omegainv : ndarray, (nobs, nobs)
inverse covariance matrix of error
omegainvsqrt : ndarray, (nobs, nobs)
squareroot inverse covariance matrix of error
such that omega = omegainvsqrt * omegainvsqrt.T
Notes
-----
This does not use sparse matrices and constructs nobs by nobs
matrices. Also, omegainvsqrt is not sparse, i.e. elements are non-zero
'''
if groups.ndim == 1:
groups = groups[:,None]
nobs, nre = groups.shape
omega = sigmas[-1]*np.eye(nobs)
for igr in range(nre):
group = groups[:,igr:igr+1]
groupuniq = np.unique(group)
dummygr = sigmas[igr] * (group == groupuniq).astype(float)
omega += np.dot(dummygr, dummygr.T)
ev, evec = np.linalg.eigh(omega) #eig does not work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainvhalf = evec/np.sqrt(ev)
return omega, omegainv, omegainvhalf | calculate error covariance matrix for random effects model
Parameters
----------
groups : ndarray, (nobs, nre) or (nobs,)
array of group/category observations
sigma : ndarray, (nre+1,)
array of standard deviations of random effects,
last element is the standard deviation of the
idiosyncratic error
Returns
-------
omega : ndarray, (nobs, nobs)
covariance matrix of error
omegainv : ndarray, (nobs, nobs)
inverse covariance matrix of error
omegainvsqrt : ndarray, (nobs, nobs)
squareroot inverse covariance matrix of error
such that omega = omegainvsqrt * omegainvsqrt.T
Notes
-----
This does not use sparse matrices and constructs nobs by nobs
matrices. Also, omegainvsqrt is not sparse, i.e. elements are non-zero | repanel_cov | python | statsmodels/statsmodels | statsmodels/sandbox/panel/panelmod.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panelmod.py | BSD-3-Clause |
def initialize(self, endog, exog, panel, time, xtnames, equation):
"""
Initialize plain array model.
See PanelModel
"""
#TODO: for now, we are going assume a constant, and then make the first
#panel the base, add a flag for this....
# get names
names = equation.split(" ")
self.endog_name = names[0]
exog_names = names[1:] # this makes the order matter in the array
self.panel_name = xtnames[0]
self.time_name = xtnames[1]
novar = exog.var(0) == 0
if True in novar:
cons_index = np.where(novar == 1)[0][0] # constant col. num
exog_names.insert(cons_index, 'cons')
self._cons_index = novar # used again in fit_fixed
self.exog_names = exog_names
self.endog = np.squeeze(np.asarray(endog))
exog = np.asarray(exog)
self.exog = exog
self.panel = np.asarray(panel)
self.time = np.asarray(time)
self.paneluniq = np.unique(panel)
self.timeuniq = np.unique(time) | Initialize plain array model.
See PanelModel | initialize | python | statsmodels/statsmodels | statsmodels/sandbox/panel/panelmod.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panelmod.py | BSD-3-Clause |
def _group_mean(self, X, index='oneway', counts=False, dummies=False):
"""
Get group means of X by time or by panel.
index default is panel
"""
if index == 'oneway':
Y = self.panel
uniq = self.paneluniq
elif index == 'time':
Y = self.time
uniq = self.timeuniq
else:
raise ValueError("index %s not understood" % index)
print(Y, uniq, uniq[:,None], len(Y), len(uniq), len(uniq[:,None]),
index)
#TODO: use sparse matrices
dummy = (Y == uniq[:,None]).astype(float)
if X.ndim > 1:
mean = np.dot(dummy,X)/dummy.sum(1)[:,None]
else:
mean = np.dot(dummy,X)/dummy.sum(1)
if counts is False and dummies is False:
return mean
elif counts is True and dummies is False:
return mean, dummy.sum(1)
elif counts is True and dummies is True:
return mean, dummy.sum(1), dummy
elif counts is False and dummies is True:
return mean, dummy | Get group means of X by time or by panel.
index default is panel | _group_mean | python | statsmodels/statsmodels | statsmodels/sandbox/panel/panelmod.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panelmod.py | BSD-3-Clause |
def fit(self, model=None, method=None, effects='oneway'):
"""
method : LSDV, demeaned, MLE, GLS, BE, FE, optional
model :
between
fixed
random
pooled
[gmm]
effects :
oneway
time
twoway
femethod : demeaned (only one implemented)
WLS
remethod :
swar -
amemiya
nerlove
walhus
Notes
-----
This is unfinished. None of the method arguments work yet.
Only oneway effects should work.
"""
if method: # get rid of this with default
method = method.lower()
model = model.lower()
if method and method not in ["lsdv", "demeaned", "mle",
"gls", "be", "fe"]:
# get rid of if method with default
raise ValueError("%s not a valid method" % method)
# if method == "lsdv":
# self.fit_lsdv(model)
if model == 'pooled':
return GLS(self.endog, self.exog).fit()
if model == 'between':
return self._fit_btwn(method, effects)
if model == 'fixed':
return self._fit_fixed(method, effects) | method : LSDV, demeaned, MLE, GLS, BE, FE, optional
model :
between
fixed
random
pooled
[gmm]
effects :
oneway
time
twoway
femethod : demeaned (only one implemented)
WLS
remethod :
swar -
amemiya
nerlove
walhus
Notes
-----
This is unfinished. None of the method arguments work yet.
Only oneway effects should work. | fit | python | statsmodels/statsmodels | statsmodels/sandbox/panel/panelmod.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/panel/panelmod.py | BSD-3-Clause |
def arfilter(x, a):
'''apply an autoregressive filter to a series x
x can be 2d, a can be 1d, 2d, or 3d
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
a : array_like
autoregressive filter coefficients, ar lag polynomial
see Notes
Returns
-------
y : ndarray, 2d
filtered array, number of columns determined by x and a
Notes
-----
In general form this uses the linear filter ::
y = a(L)x
where
x : nobs, nvars
a : nlags, nvars, npoly
Depending on the shape and dimension of a this uses different
Lag polynomial arrays
case 1 : a is 1d or (nlags,1)
one lag polynomial is applied to all variables (columns of x)
case 2 : a is 2d, (nlags, nvars)
each series is independently filtered with its own
lag polynomial, uses loop over nvar
case 3 : a is 3d, (nlags, nvars, npoly)
the ith column of the output array is given by the linear filter
defined by the 2d array a[:,:,i], i.e. ::
y[:,i] = a(.,.,i)(L) * x
y[t,i] = sum_p sum_j a(p,j,i)*x(t-p,j)
for p = 0,...nlags-1, j = 0,...nvars-1,
for all t >= nlags
Note: maybe convert to axis=1, Not
TODO: initial conditions
'''
x = np.asarray(x)
a = np.asarray(a)
if x.ndim == 1:
x = x[:,None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
nvar = x.shape[1]
nlags = a.shape[0]
ntrim = nlags//2
# for x is 2d with ncols >1
if a.ndim == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a[:,None], mode='valid')
# alternative:
#return signal.lfilter(a,[1],x.astype(float),axis=0)
elif a.ndim == 2:
if min(a.shape) == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a, mode='valid')
# case: independent ar
#(a bit like recserar in gauss, but no x yet)
result = np.zeros((x.shape[0]-nlags+1, nvar))
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:,i] = signal.convolve(x[:,i], a[:,i], mode='valid')
return result
elif a.ndim == 3:
# case: vector autoregressive with lag matrices
# #not necessary:
# if np.any(a.shape[1:] != nvar):
# raise ValueError('if 3d shape of a has to be (nobs,nvar,nvar)')
yf = signal.convolve(x[:,:,None], a)
yvalid = yf[ntrim:-ntrim, yf.shape[1]//2,:]
return yvalid | apply an autoregressive filter to a series x
x can be 2d, a can be 1d, 2d, or 3d
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
a : array_like
autoregressive filter coefficients, ar lag polynomial
see Notes
Returns
-------
y : ndarray, 2d
filtered array, number of columns determined by x and a
Notes
-----
In general form this uses the linear filter ::
y = a(L)x
where
x : nobs, nvars
a : nlags, nvars, npoly
Depending on the shape and dimension of a this uses different
Lag polynomial arrays
case 1 : a is 1d or (nlags,1)
one lag polynomial is applied to all variables (columns of x)
case 2 : a is 2d, (nlags, nvars)
each series is independently filtered with its own
lag polynomial, uses loop over nvar
case 3 : a is 3d, (nlags, nvars, npoly)
the ith column of the output array is given by the linear filter
defined by the 2d array a[:,:,i], i.e. ::
y[:,i] = a(.,.,i)(L) * x
y[t,i] = sum_p sum_j a(p,j,i)*x(t-p,j)
for p = 0,...nlags-1, j = 0,...nvars-1,
for all t >= nlags
Note: maybe convert to axis=1, Not
TODO: initial conditions | arfilter | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/try_var_convolve.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/try_var_convolve.py | BSD-3-Clause |
def VAR(x,B, const=0):
''' multivariate linear filter
Parameters
----------
x: (TxK) array
columns are variables, rows are observations for time period
B: (PxKxK) array
b_t-1 is bottom "row", b_t-P is top "row" when printing
B(:,:,0) is lag polynomial matrix for variable 1
B(:,:,k) is lag polynomial matrix for variable k
B(p,:,k) is pth lag for variable k
B[p,:,:].T corresponds to A_p in Wikipedia
const : float or array (not tested)
constant added to autoregression
Returns
-------
xhat: (TxK) array
filtered, predicted values of x array
Notes
-----
xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } for all i = 0,K-1, for all t=p..T
xhat does not include the forecasting observation, xhat(T+1),
xhat is 1 row shorter than signal.correlate
References
----------
https://en.wikipedia.org/wiki/Vector_Autoregression
https://en.wikipedia.org/wiki/General_matrix_notation_of_a_VAR(p)
'''
p = B.shape[0]
T = x.shape[0]
xhat = np.zeros(x.shape)
for t in range(p,T): #[p+2]:#
## print(p,T)
## print(x[t-p:t,:,np.newaxis].shape)
## print(B.shape)
#print(x[t-p:t,:,np.newaxis])
xhat[t,:] = const + (x[t-p:t,:,np.newaxis]*B).sum(axis=1).sum(axis=0)
return xhat | multivariate linear filter
Parameters
----------
x: (TxK) array
columns are variables, rows are observations for time period
B: (PxKxK) array
b_t-1 is bottom "row", b_t-P is top "row" when printing
B(:,:,0) is lag polynomial matrix for variable 1
B(:,:,k) is lag polynomial matrix for variable k
B(p,:,k) is pth lag for variable k
B[p,:,:].T corresponds to A_p in Wikipedia
const : float or array (not tested)
constant added to autoregression
Returns
-------
xhat: (TxK) array
filtered, predicted values of x array
Notes
-----
xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } for all i = 0,K-1, for all t=p..T
xhat does not include the forecasting observation, xhat(T+1),
xhat is 1 row shorter than signal.correlate
References
----------
https://en.wikipedia.org/wiki/Vector_Autoregression
https://en.wikipedia.org/wiki/General_matrix_notation_of_a_VAR(p) | VAR | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/varma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/varma.py | BSD-3-Clause |
def VARMA(x,B,C, const=0):
''' multivariate linear filter
x (TxK)
B (PxKxK)
xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } +
sum{_q}sum{_k} { e(t-Q:t,:) .* C(:,:,i) }for all i = 0,K-1
'''
P = B.shape[0]
Q = C.shape[0]
T = x.shape[0]
xhat = np.zeros(x.shape)
e = np.zeros(x.shape)
start = max(P,Q)
for t in range(start,T): #[p+2]:#
## print(p,T
## print(x[t-p:t,:,np.newaxis].shape
## print(B.shape
#print(x[t-p:t,:,np.newaxis]
xhat[t,:] = const + (x[t-P:t,:,np.newaxis]*B).sum(axis=1).sum(axis=0) + \
(e[t-Q:t,:,np.newaxis]*C).sum(axis=1).sum(axis=0)
e[t,:] = x[t,:] - xhat[t,:]
return xhat, e | multivariate linear filter
x (TxK)
B (PxKxK)
xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } +
sum{_q}sum{_k} { e(t-Q:t,:) .* C(:,:,i) }for all i = 0,K-1 | VARMA | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/varma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/varma.py | BSD-3-Clause |
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : bool
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr] | pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : bool
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy. | padarr | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad | construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients | pad | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n)) | Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial | fftar | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n)) | Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial | fftma | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n)) | Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial | fftarma | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #does not show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w | raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft | spd | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))
#return np.abs(spd)[n//2-1:]
w = fft.fftfreq(n) * 2 * np.pi
slice(n//2-1, None, None)
#return (hw*hw.conj()).real[wslice], w[wslice]
return (hw*hw.conj()).real, w | power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half | spdshift | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def spddirect(self, n):
'''power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
#abs looks wrong
hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)
w = fft.fftfreq(n) * 2 * np.pi
return (np.abs(hw)**2) * 0.5/np.pi, w | power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half | spddirect | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def _spddirect2(self, n):
'''this looks bad, maybe with an fftshift
'''
#size = s1+s2-1
hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
/ fft.fft(np.r_[self.ar[::-1],self.ar], n))
return (hw*hw.conj()) #.real[n//2-1:] | this looks bad, maybe with an fftshift | _spddirect2 | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def spdroots(self, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
'''
return self._spdroots(self.arroots, self.maroots, w) | spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies) | spdroots | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def _spdroots(self, arroots, maroots, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function
'''
w = np.atleast_2d(w).T
cosw = np.cos(w)
#Greene 5th edt. p626, section 20.2.7.a.
maroots = 1./maroots
arroots = 1./arroots
num = 1 + maroots**2 - 2* maroots * cosw
den = 1 + arroots**2 - 2* arroots * cosw
#print 'num.shape, den.shape', num.shape, den.shape
hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog
return np.squeeze(hw), w.squeeze() | spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function | _spdroots | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def spdpoly(self, w, nma=50):
'''spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3
'''
mpoly = np.polynomial.Polynomial(self.arma2ma(nma))
hw = mpoly(np.exp(1j * w))
spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)
return spd, w | spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3 | spdpoly | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def filter(self, x):
'''
filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve
'''
n = x.shape[0]
if n == self.fftarma:
fftarma = self.fftarma
else:
fftarma = self.fftma(n) / self.fftar(n)
tmpfft = fftarma * fft.fft(x)
return fft.ifft(tmpfft) | filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve | filter | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def filter2(self, x, pad=0):
'''filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end
'''
from statsmodels.tsa.filters import fftconvolve3
if not pad:
pass
elif pad == 'auto':
#just guessing how much padding
x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)
else:
x = self.padarr(x, x.shape[0] + int(pad), atend=False)
return fftconvolve3(x, self.ma, self.ar) | filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end | filter2 | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def acf2spdfreq(self, acovf, nfreq=100, w=None):
'''
not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)[:, None]
nac = len(acovf)
hw = 0.5 / np.pi * (acovf[0] +
2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))
return hw | not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window | acf2spdfreq | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def invpowerspd(self, n):
'''autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
'''
hw = self.fftarma(n)
return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n] | autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625]) | invpowerspd | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def spdmapoly(self, w, twosided=False):
'''ma only, need division for ar, use LagPolynomial
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)
return 0.5 / np.pi * self.mapoly(np.exp(w*1j)) | ma only, need division for ar, use LagPolynomial | spdmapoly | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
"""Plot results"""
rvs = self.generate_sample(nsample=100, burnin=500)
acf = self.acf(nacf)[:nacf] #TODO: check return length
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq)
spdr, wr = self.spdroots(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(rvs)
ax.set_title(f'Random Sample \nar={self.ar}, ma={self.ma}')
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title(f'Autocorrelation \nar={self.ar}, ma={self.ma!r}s')
ax = fig.add_subplot(2,2,3)
ax.plot(wr, spdr)
ax.set_title(f'Power Spectrum \nar={self.ar}, ma={self.ma}')
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title(f'Partial Autocorrelation \nar={self.ar}, ma={self.ma}')
return fig | Plot results | plot4 | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/fftarma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/fftarma.py | BSD-3-Clause |
def movorder(x, order = 'med', windsize=3, lag='lagged'):
'''moving order statistics
Parameters
----------
x : ndarray
time series data
order : float or 'med', 'min', 'max'
which order statistic to calculate
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
filtered array
'''
#if windsize is even should it raise ValueError
if lag == 'lagged':
lead = windsize//2
elif lag == 'centered':
lead = 0
elif lag == 'leading':
lead = -windsize//2 +1
else:
raise ValueError
if np.isfinite(order): #if np.isnumber(order):
ord = order # note: ord is a builtin function
elif order == 'med':
ord = (windsize - 1)/2
elif order == 'min':
ord = 0
elif order == 'max':
ord = windsize - 1
else:
raise ValueError
#return signal.order_filter(x,np.ones(windsize),ord)[:-lead]
xext = expandarr(x, windsize)
#np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]]
return signal.order_filter(xext,np.ones(windsize),ord)[windsize-lead:-(windsize+lead)] | moving order statistics
Parameters
----------
x : ndarray
time series data
order : float or 'med', 'min', 'max'
which order statistic to calculate
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
filtered array | movorder | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/movstat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/movstat.py | BSD-3-Clause |
def check_movorder():
'''graphical test for movorder'''
import matplotlib.pylab as plt
x = np.arange(1,10)
xo = movorder(x, order='max')
assert_array_equal(xo, x)
x = np.arange(10,1,-1)
xo = movorder(x, order='min')
assert_array_equal(xo, x)
assert_array_equal(movorder(x, order='min', lag='centered')[:-1], x[1:])
tt = np.linspace(0,2*np.pi,15)
x = np.sin(tt) + 1
xo = movorder(x, order='max')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max lagged')
xo = movorder(x, order='max', lag='centered')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max centered')
xo = movorder(x, order='max', lag='leading')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max leading') | graphical test for movorder | check_movorder | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/movstat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/movstat.py | BSD-3-Clause |
def movmean(x, windowsize=3, lag='lagged'):
'''moving window mean
Parameters
----------
x : ndarray
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : ndarray
moving mean, with same shape as x
Notes
-----
for leading and lagging the data array x is extended by the closest value of the array
'''
return movmoment(x, 1, windowsize=windowsize, lag=lag) | moving window mean
Parameters
----------
x : ndarray
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : ndarray
moving mean, with same shape as x
Notes
-----
for leading and lagging the data array x is extended by the closest value of the array | movmean | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/movstat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/movstat.py | BSD-3-Clause |
def movvar(x, windowsize=3, lag='lagged'):
'''moving window variance
Parameters
----------
x : ndarray
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : ndarray
moving variance, with same shape as x
'''
m1 = movmoment(x, 1, windowsize=windowsize, lag=lag)
m2 = movmoment(x, 2, windowsize=windowsize, lag=lag)
return m2 - m1*m1 | moving window variance
Parameters
----------
x : ndarray
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : ndarray
moving variance, with same shape as x | movvar | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/movstat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/movstat.py | BSD-3-Clause |
def movmoment(x, k, windowsize=3, lag='lagged'):
'''non-central moment
Parameters
----------
x : ndarray
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : ndarray
k-th moving non-central moment, with same shape as x
Notes
-----
If data x is 2d, then moving moment is calculated for each
column.
'''
windsize = windowsize
#if windsize is even should it raise ValueError
if lag == 'lagged':
#lead = -0 + windsize #windsize//2
lead = -0# + (windsize-1) + windsize//2
sl = slice((windsize-1) or None, -2*(windsize-1) or None)
elif lag == 'centered':
lead = -windsize//2 #0#-1 #+ #(windsize-1)
sl = slice((windsize-1)+windsize//2 or None, -(windsize-1)-windsize//2 or None)
elif lag == 'leading':
#lead = -windsize +1#+1 #+ (windsize-1)#//2 +1
lead = -windsize +2 #-windsize//2 +1
sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
else:
raise ValueError
avgkern = (np.ones(windowsize)/float(windowsize))
xext = expandarr(x, windsize-1)
#Note: expandarr increases the array size by 2*(windsize-1)
#sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
print(sl)
if xext.ndim == 1:
return np.correlate(xext**k, avgkern, 'full')[sl]
#return np.correlate(xext**k, avgkern, 'same')[windsize-lead:-(windsize+lead)]
else:
print(xext.shape)
print(avgkern[:,None].shape)
# try first with 2d along columns, possibly ndim with axis
return signal.correlate(xext**k, avgkern[:,None], 'full')[sl,:] | non-central moment
Parameters
----------
x : ndarray
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : ndarray
k-th moving non-central moment, with same shape as x
Notes
-----
If data x is 2d, then moving moment is calculated for each
column. | movmoment | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/movstat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/movstat.py | BSD-3-Clause |
def simulateW(self, nobs=100, T=1, dt=None, nrepl=1):
'''generate sample of Wiener Process
'''
dt = T*1.0/nobs
t = np.linspace(dt, 1, nobs)
dW = np.sqrt(dt)*np.random.normal(size=(nrepl, nobs))
W = np.cumsum(dW,1)
self.dW = dW
return W, t | generate sample of Wiener Process | simulateW | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/diffusion.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py | BSD-3-Clause |
def expectedsim(self, func, nobs=100, T=1, dt=None, nrepl=1):
'''get expectation of a function of a Wiener Process by simulation
initially test example from
'''
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
U = func(t, W)
Umean = U.mean(0)
return U, Umean, t | get expectation of a function of a Wiener Process by simulation
initially test example from | expectedsim | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/diffusion.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py | BSD-3-Clause |
def simEM(self, xzero=None, nobs=100, T=1, dt=None, nrepl=1, Tratio=4):
'''
from Higham 2001
TODO: reverse parameterization to start with final nobs and DT
TODO: check if I can skip the loop using my way from exactprocess
problem might be Winc (reshape into 3d and sum)
TODO: (later) check memory efficiency for large simulations
'''
#TODO: reverse parameterization to start with final nobs and DT
nobs = nobs * Tratio # simple way to change parameter
# maybe wrong parameterization,
# drift too large, variance too small ? which dt/Dt
# _drift, _sig independent of dt is wrong
if xzero is None:
xzero = self.xzero
dW = self.dW
L = nobs/Tratio # L EM steps of size Dt = R*dt
Xem = np.zeros((nrepl,L)) # preallocate for efficiency
Xtemp = xzero
Xem[:,0] = xzero
for j in np.arange(1,L):
#Winc = np.sum(dW[:,Tratio*(j-1)+1:Tratio*j],1)
Winc = np.sum(dW[:,np.arange(Tratio*(j-1)+1,Tratio*j)],1)
#Xtemp = Xtemp + Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xtemp = Xtemp + self._drift(x=Xtemp) + self._sig(x=Xtemp) * Winc
#Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xem[:,j] = Xtemp
return Xem | from Higham 2001
TODO: reverse parameterization to start with final nobs and DT
TODO: check if I can skip the loop using my way from exactprocess
problem might be Winc (reshape into 3d and sum)
TODO: (later) check memory efficiency for large simulations | simEM | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/diffusion.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py | BSD-3-Clause |
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
'''
np.linspace(ddt, nobs*ddt, nobs)
#expnt = np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? if mean reverting lag-coeff<1
#lfilter does not handle 2d arrays, it does?
inc = self._exactconst(expddt) + self._exactstd(expddt) * normrvs
return signal.lfilter([1.], [1.,-expddt], inc) | ddt : discrete delta t
should be the same as an AR(1)
not tested yet | exactprocess | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/diffusion.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py | BSD-3-Clause |
def exactprocess(self, nobs, xzero=None, ddt=1., nrepl=2):
'''ddt : discrete delta t
not tested yet
'''
if xzero is None:
xzero = self.xzero
np.linspace(ddt, nobs*ddt, nobs)
normrvs = np.random.normal(size=(nrepl,nobs))
inc = self._drift + self._sigma * np.sqrt(ddt) * normrvs
#return signal.lfilter([1.], [1.,-1], inc)
return xzero + np.cumsum(inc,1) | ddt : discrete delta t
not tested yet | exactprocess | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/diffusion.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py | BSD-3-Clause |
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
# after writing this I saw the same use of lfilter in sitmo
'''
t = np.linspace(ddt, nobs*ddt, nobs)
np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? lfilter does not handle 2d arrays, it does?
from scipy import signal
#xzero * expnt
inc = ( self.mu * (1-expddt) +
self.sigma * np.sqrt((1-expddt*expddt)/2./self.lambd) * normrvs )
return signal.lfilter([1.], [1.,-expddt], inc) | ddt : discrete delta t
should be the same as an AR(1)
not tested yet
# after writing this I saw the same use of lfilter in sitmo | exactprocess | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/diffusion.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py | BSD-3-Clause |
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs), data[:-1]))
parest, res, rank, sing = np.linalg.lstsq(exog, data[1:], rcond=-1)
const, slope = parest
errvar = res/(nobs-2.)
lambd = -np.log(slope)/dt
sigma = np.sqrt(-errvar * 2.*np.log(slope)/ (1-slope**2)/dt)
mu = const / (1-slope)
return mu, lambd, sigma | assumes data is 1d, univariate time series
formula from sitmo | fitls | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/diffusion.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py | BSD-3-Clause |
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''uses exact solution for log of process
'''
np.log(xzero)
lnx = super(self.__class__, self).exactprocess(xzero, nobs, ddt=ddt, nrepl=nrepl)
return np.exp(lnx) | uses exact solution for log of process | exactprocess | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/diffusion.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py | BSD-3-Clause |
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs),np.log(data[:-1])))
parest, res, rank, sing = np.linalg.lstsq(exog, np.log(data[1:]), rcond=-1)
const, slope = parest
errvar = res/(nobs-2.) #check denominator estimate, of sigma too low
kappa = -np.log(slope)/dt
sigma = np.sqrt(errvar * kappa / (1-np.exp(-2*kappa*dt)))
mu = const / (1-np.exp(-kappa*dt)) + sigma**2/2./kappa
if np.shape(mu)== (1,):
mu = mu[0] # TODO: how to remove scalar array ?
if np.shape(sigma)== (1,):
sigma = sigma[0]
#mu, kappa are good, sigma too small
return mu, kappa, sigma | assumes data is 1d, univariate time series
formula from sitmo | fitls | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/diffusion.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/diffusion.py | BSD-3-Clause |
def acovf_explicit(ar, ma, nobs):
'''add correlation of MA representation explicitely
'''
ir = arma_impulse_response(ar, ma)
acovfexpl = [np.dot(ir[:nobs-t], ir[t:nobs]) for t in range(10)]
return acovfexpl | add correlation of MA representation explicitely | acovf_explicit | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/example_arma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/example_arma.py | BSD-3-Clause |
def autocorr(s, axis=-1):
"""Returns the autocorrelation of signal s at all lags. Adheres to the
definition r(k) = E{s(n)s*(n-k)} where E{} is the expectation operator.
"""
N = s.shape[axis]
S = np.fft.fft(s, n=2*N-1, axis=axis)
sxx = np.fft.ifft(S*S.conjugate(), axis=axis).real[:N]
return sxx/N | Returns the autocorrelation of signal s at all lags. Adheres to the
definition r(k) = E{s(n)s*(n-k)} where E{} is the expectation operator. | autocorr | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/example_arma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/example_arma.py | BSD-3-Clause |
def norm_corr(x,y,mode = 'valid'):
"""Returns the correlation between two ndarrays, by calling np.correlate in
'same' mode and normalizing the result by the std of the arrays and by
their lengths. This results in a correlation = 1 for an auto-correlation"""
return ( np.correlate(x,y,mode) /
(np.std(x)*np.std(y)*(x.shape[-1])) ) | Returns the correlation between two ndarrays, by calling np.correlate in
'same' mode and normalizing the result by the std of the arrays and by
their lengths. This results in a correlation = 1 for an auto-correlation | norm_corr | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/example_arma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/example_arma.py | BSD-3-Clause |
def pltxcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
"""
call signature::
def xcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed:
c /= np.sqrt(np.dot(x, x) * np.dot(y, y))
if maxlags is None:
maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maxlags must be None or strictly '
'positive < %d' % Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
self.plot(lags, c, **kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b | call signature::
def xcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py | pltxcorr | python | statsmodels/statsmodels | statsmodels/sandbox/tsa/example_arma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tsa/example_arma.py | BSD-3-Clause |
def _ll_nb1(y, X, beta, alph):
'''Negative Binomial regression (type 1 likelihood)'''
ll = _ll_nbp(y, X, beta, alph, Q=1)
return ll | Negative Binomial regression (type 1 likelihood) | _ll_nb1 | python | statsmodels/statsmodels | statsmodels/sandbox/examples/example_nbin.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/examples/example_nbin.py | BSD-3-Clause |
def _ll_nb2(y, X, beta, alph):
'''Negative Binomial regression (type 2 likelihood)'''
ll = _ll_nbp(y, X, beta, alph, Q=0)
return ll | Negative Binomial regression (type 2 likelihood) | _ll_nb2 | python | statsmodels/statsmodels | statsmodels/sandbox/examples/example_nbin.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/examples/example_nbin.py | BSD-3-Clause |
def _ll_geom(y, X, beta):
'''Geometric regression'''
ll = _ll_nbp(y, X, beta, alph=1, Q=0)
return ll | Geometric regression | _ll_geom | python | statsmodels/statsmodels | statsmodels/sandbox/examples/example_nbin.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/examples/example_nbin.py | BSD-3-Clause |
def print_results2(res):
groupind = res.groups
#res.fitjoint() #not really necessary, because called by ftest_summary
ft = res.ftest_summary()
#print ft[0] #skip because table is nicer
templ = \
'''Table of F-tests for overall or pairwise equality of coefficients'
%(tab)s
Notes: p-values are not corrected for many tests
(no Bonferroni correction)
* : reject at 5%% uncorrected confidence level
Null hypothesis: all or pairwise coefficient are the same'
Alternative hypothesis: all coefficients are different'
Comparison with stats.f_oneway
%(statsfow)s
Likelihood Ratio Test
%(lrtest)s
Null model: pooled all coefficients are the same across groups,'
Alternative model: all coefficients are allowed to be different'
not verified but looks close to f-test result'
OLS parameters by group from individual, separate ols regressions'
%(olsbg)s
for group in sorted(res.olsbygroup):
r = res.olsbygroup[group]
print group, r.params
Check for heteroscedasticity, '
variance and standard deviation for individual regressions'
%(grh)s
variance ', res.sigmabygroup
standard dev', np.sqrt(res.sigmabygroup)
'''
from statsmodels.iolib import SimpleTable
resvals = {}
resvals['tab'] = str(SimpleTable([([f'{row[0]!r}']
+ list(row[1])
+ ['*']*(row[1][1]>0.5).item() ) for row in ft[1]],
headers=['pair', 'F-statistic','p-value','df_denom',
'df_num']))
resvals['statsfow'] = str(stats.f_oneway(*[y[groupind==gr] for gr in
res.unique]))
#resvals['lrtest'] = str(res.lr_test())
resvals['lrtest'] = str(SimpleTable([res.lr_test()],
headers=['likelihood ratio', 'p-value', 'df'] ))
resvals['olsbg'] = str(SimpleTable([[group]
+ res.olsbygroup[group].params.tolist()
for group in sorted(res.olsbygroup)]))
resvals['grh'] = str(SimpleTable(np.vstack([res.sigmabygroup,
np.sqrt(res.sigmabygroup)]),
headers=res.unique.tolist()))
return templ % resvals | Table of F-tests for overall or pairwise equality of coefficients'
%(tab)s
Notes: p-values are not corrected for many tests
(no Bonferroni correction)
* : reject at 5%% uncorrected confidence level
Null hypothesis: all or pairwise coefficient are the same'
Alternative hypothesis: all coefficients are different'
Comparison with stats.f_oneway
%(statsfow)s
Likelihood Ratio Test
%(lrtest)s
Null model: pooled all coefficients are the same across groups,'
Alternative model: all coefficients are allowed to be different'
not verified but looks close to f-test result'
OLS parameters by group from individual, separate ols regressions'
%(olsbg)s
for group in sorted(res.olsbygroup):
r = res.olsbygroup[group]
print group, r.params
Check for heteroscedasticity, '
variance and standard deviation for individual regressions'
%(grh)s
variance ', res.sigmabygroup
standard dev', np.sqrt(res.sigmabygroup) | print_results2 | python | statsmodels/statsmodels | statsmodels/sandbox/examples/ex_onewaygls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/examples/ex_onewaygls.py | BSD-3-Clause |
def loglike_ar1(x, rho):
'''loglikelihood of AR(1) process, as a test case
sigma_u partially hard coded
Greene chapter 12 eq. (12-31)
'''
x = np.asarray(x)
u = np.r_[x[0], x[1:] - rho * x[:-1]]
sigma_u2 = 2*(1-rho**2)
loglik = 0.5*(-(u**2).sum(0) / sigma_u2 + np.log(1-rho**2)
- x.shape[0] * (np.log(2*np.pi) + np.log(sigma_u2)))
return loglik | loglikelihood of AR(1) process, as a test case
sigma_u partially hard coded
Greene chapter 12 eq. (12-31) | loglike_ar1 | python | statsmodels/statsmodels | statsmodels/sandbox/archive/linalg_covmat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/linalg_covmat.py | BSD-3-Clause |
def ar2transform(x, arcoefs):
'''
(Greene eq 12-30)
'''
a1, a2 = arcoefs
y = np.zeros_like(x)
y[0] = np.sqrt((1+a2) * ((1-a2)**2 - a1**2) / (1-a2)) * x[0]
y[1] = np.sqrt(1-a2**2) * x[2] - a1 * np.sqrt(1-a1**2)/(1-a2) * x[1] #TODO:wrong index in x
y[2:] = x[2:] - a1 * x[1:-1] - a2 * x[:-2]
return y | (Greene eq 12-30) | ar2transform | python | statsmodels/statsmodels | statsmodels/sandbox/archive/linalg_covmat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/linalg_covmat.py | BSD-3-Clause |
def mvn_loglike(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
sigmainv = linalg.inv(sigma)
logdetsigma = np.log(np.linalg.det(sigma))
nobs = len(x)
llf = - np.dot(x, np.dot(sigmainv, x))
llf -= nobs * np.log(2 * np.pi)
llf -= logdetsigma
llf *= 0.5
return llf | loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient | mvn_loglike | python | statsmodels/statsmodels | statsmodels/sandbox/archive/linalg_covmat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/linalg_covmat.py | BSD-3-Clause |
def mvn_nloglike_obs(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
#Still wasteful to calculate pinv first
sigmainv = linalg.inv(sigma)
cholsigmainv = linalg.cholesky(sigmainv)
#2 * np.sum(np.log(np.diagonal(np.linalg.cholesky(A)))) #Dag mailinglist
# logdet not needed ???
#logdetsigma = 2 * np.sum(np.log(np.diagonal(cholsigmainv)))
x_whitened = np.dot(cholsigmainv, x)
#sigmainv = linalg.cholesky(sigma)
np.log(np.linalg.det(sigma))
sigma2 = 1. # error variance is included in sigma
llike = 0.5 * (np.log(sigma2) - 2.* np.log(np.diagonal(cholsigmainv))
+ (x_whitened**2)/sigma2
+ np.log(2*np.pi))
return llike, (x_whitened**2) | loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient | mvn_nloglike_obs | python | statsmodels/statsmodels | statsmodels/sandbox/archive/linalg_covmat.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/linalg_covmat.py | BSD-3-Clause |
def yt_minv_y(self, y):
'''xSigmainvx
does not use stored cholesky yet
'''
return np.dot(x,linalg.cho_solve(linalg.cho_factor(self.m),x)) | xSigmainvx
does not use stored cholesky yet | yt_minv_y | python | statsmodels/statsmodels | statsmodels/sandbox/archive/linalg_decomp_1.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/linalg_decomp_1.py | BSD-3-Clause |
def tiny2zero(x, eps = 1e-15):
'''replace abs values smaller than eps by zero, makes copy
'''
mask = np.abs(x.copy()) < eps
x[mask] = 0
return x | replace abs values smaller than eps by zero, makes copy | tiny2zero | python | statsmodels/statsmodels | statsmodels/sandbox/archive/linalg_decomp_1.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/linalg_decomp_1.py | BSD-3-Clause |
def acovf_fft(x, demean=True):
'''autocovariance function with call to fftconvolve, biased
Parameters
----------
x : array_like
timeseries, signal
demean : bool
If true, then demean time series
Returns
-------
acovf : ndarray
autocovariance for data, same length as x
might work for nd in parallel with time along axis 0
'''
from scipy import signal
x = np.asarray(x)
if demean:
x = x - x.mean()
signal.fftconvolve(x,x[::-1])[len(x)-1:len(x)+10]/x.shape[0] | autocovariance function with call to fftconvolve, biased
Parameters
----------
x : array_like
timeseries, signal
demean : bool
If true, then demean time series
Returns
-------
acovf : ndarray
autocovariance for data, same length as x
might work for nd in parallel with time along axis 0 | acovf_fft | python | statsmodels/statsmodels | statsmodels/sandbox/archive/tsa.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/archive/tsa.py | BSD-3-Clause |
def _opt_wtd_nuis_regress(self, test_vals):
"""
A function that is optimized over nuisance parameters to conduct a
hypothesis test for the parameters of interest
Parameters
----------
params: 1d array
The regression coefficients of the model. This includes the
nuisance and parameters of interests.
Returns
-------
llr : float
-2 times the log likelihood of the nuisance parameters and the
hypothesized value of the parameter(s) of interest.
"""
test_params = test_vals.reshape(self.model.nvar, 1)
est_vect = self.model.uncens_exog * (self.model.uncens_endog -
np.dot(self.model.uncens_exog,
test_params))
eta_star = self._modif_newton(np.zeros(self.model.nvar), est_vect,
self.model._fit_weights)
denom = np.sum(self.model._fit_weights) + np.dot(eta_star, est_vect.T)
self.new_weights = self.model._fit_weights / denom
return -1 * np.sum(np.log(self.new_weights)) | A function that is optimized over nuisance parameters to conduct a
hypothesis test for the parameters of interest
Parameters
----------
params: 1d array
The regression coefficients of the model. This includes the
nuisance and parameters of interests.
Returns
-------
llr : float
-2 times the log likelihood of the nuisance parameters and the
hypothesized value of the parameter(s) of interest. | _opt_wtd_nuis_regress | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def _EM_test(self, nuisance_params, params=None, param_nums=None,
b0_vals=None, F=None, survidx=None, uncens_nobs=None,
numcensbelow=None, km=None, uncensored=None, censored=None,
maxiter=None, ftol=None):
"""
Uses EM algorithm to compute the maximum likelihood of a test
Parameters
----------
nuisance_params : ndarray
Vector of values to be used as nuisance params.
maxiter : int
Number of iterations in the EM algorithm for a parameter vector
Returns
-------
-2 ''*'' log likelihood ratio at hypothesized values and
nuisance params
Notes
-----
Optional parameters are provided by the test_beta function.
"""
iters = 0
params[param_nums] = b0_vals
nuis_param_index = np.int_(np.delete(np.arange(self.model.nvar),
param_nums))
params[nuis_param_index] = nuisance_params
to_test = params.reshape(self.model.nvar, 1)
opt_res = np.inf
diff = np.inf
while iters < maxiter and diff > ftol:
F = F.flatten()
death = np.cumsum(F[::-1])
survivalprob = death[::-1]
surv_point_mat = np.dot(F.reshape(-1, 1),
1. / survivalprob[survidx].reshape(1, - 1))
surv_point_mat = add_constant(surv_point_mat)
summed_wts = np.cumsum(surv_point_mat, axis=1)
wts = summed_wts[np.int_(np.arange(uncens_nobs)),
numcensbelow[uncensored]]
# ^E step
# See Zhou 2005, section 3.
self.model._fit_weights = wts
new_opt_res = self._opt_wtd_nuis_regress(to_test)
# ^ Uncensored weights' contribution to likelihood value.
F = self.new_weights
# ^ M step
diff = np.abs(new_opt_res - opt_res)
opt_res = new_opt_res
iters = iters + 1
death = np.cumsum(F.flatten()[::-1])
survivalprob = death[::-1]
llike = -opt_res + np.sum(np.log(survivalprob[survidx]))
wtd_km = km.flatten() / np.sum(km)
survivalmax = np.cumsum(wtd_km[::-1])[::-1]
llikemax = np.sum(np.log(wtd_km[uncensored])) + \
np.sum(np.log(survivalmax[censored]))
if iters == maxiter:
warnings.warn('The EM reached the maximum number of iterations',
IterationLimitWarning)
return -2 * (llike - llikemax) | Uses EM algorithm to compute the maximum likelihood of a test
Parameters
----------
nuisance_params : ndarray
Vector of values to be used as nuisance params.
maxiter : int
Number of iterations in the EM algorithm for a parameter vector
Returns
-------
-2 ''*'' log likelihood ratio at hypothesized values and
nuisance params
Notes
-----
Optional parameters are provided by the test_beta function. | _EM_test | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def _ci_limits_beta(self, b0, param_num=None):
"""
Returns the difference between the log likelihood for a
parameter and some critical value.
Parameters
----------
b0: float
Value of a regression parameter
param_num : int
Parameter index of b0
"""
return self.test_beta([b0], [param_num])[0] - self.r0 | Returns the difference between the log likelihood for a
parameter and some critical value.
Parameters
----------
b0: float
Value of a regression parameter
param_num : int
Parameter index of b0 | _ci_limits_beta | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def _is_tied(self, endog, censors):
"""
Indicated if an observation takes the same value as the next
ordered observation.
Parameters
----------
endog : ndarray
Models endogenous variable
censors : ndarray
arrat indicating a censored array
Returns
-------
indic_ties : ndarray
ties[i]=1 if endog[i]==endog[i+1] and
censors[i]=censors[i+1]
"""
nobs = int(self.nobs)
endog_idx = endog[np.arange(nobs - 1)] == (
endog[np.arange(nobs - 1) + 1])
censors_idx = censors[np.arange(nobs - 1)] == (
censors[np.arange(nobs - 1) + 1])
indic_ties = endog_idx * censors_idx # Both true
return np.int_(indic_ties) | Indicated if an observation takes the same value as the next
ordered observation.
Parameters
----------
endog : ndarray
Models endogenous variable
censors : ndarray
arrat indicating a censored array
Returns
-------
indic_ties : ndarray
ties[i]=1 if endog[i]==endog[i+1] and
censors[i]=censors[i+1] | _is_tied | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def _km_w_ties(self, tie_indic, untied_km):
"""
Computes KM estimator value at each observation, taking into acocunt
ties in the data.
Parameters
----------
tie_indic: 1d array
Indicates if the i'th observation is the same as the ith +1
untied_km: 1d array
Km estimates at each observation assuming no ties.
"""
# TODO: Vectorize, even though it is only 1 pass through for any
# function call
num_same = 1
idx_nums = []
for obs_num in np.arange(int(self.nobs - 1))[::-1]:
if tie_indic[obs_num] == 1:
idx_nums.append(obs_num)
num_same = num_same + 1
untied_km[obs_num] = untied_km[obs_num + 1]
elif tie_indic[obs_num] == 0 and num_same > 1:
idx_nums.append(max(idx_nums) + 1)
idx_nums = np.asarray(idx_nums)
untied_km[idx_nums] = untied_km[idx_nums]
num_same = 1
idx_nums = []
return untied_km.reshape(self.nobs, 1) | Computes KM estimator value at each observation, taking into acocunt
ties in the data.
Parameters
----------
tie_indic: 1d array
Indicates if the i'th observation is the same as the ith +1
untied_km: 1d array
Km estimates at each observation assuming no ties. | _km_w_ties | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def _make_km(self, endog, censors):
"""
Computes the Kaplan-Meier estimate for the weights in the AFT model
Parameters
----------
endog: nx1 array
Array of response variables
censors: nx1 array
Censor-indicating variable
Returns
-------
Kaplan Meier estimate for each observation
Notes
-----
This function makes calls to _is_tied and km_w_ties to handle ties in
the data.If a censored observation and an uncensored observation has
the same value, it is assumed that the uncensored happened first.
"""
nobs = self.nobs
num = (nobs - (np.arange(nobs) + 1.))
denom = (nobs - (np.arange(nobs) + 1.) + 1.)
km = (num / denom).reshape(nobs, 1)
km = km ** np.abs(censors - 1.)
km = np.cumprod(km) # If no ties, this is kaplan-meier
tied = self._is_tied(endog, censors)
wtd_km = self._km_w_ties(tied, km)
return (censors / wtd_km).reshape(nobs, 1) | Computes the Kaplan-Meier estimate for the weights in the AFT model
Parameters
----------
endog: nx1 array
Array of response variables
censors: nx1 array
Censor-indicating variable
Returns
-------
Kaplan Meier estimate for each observation
Notes
-----
This function makes calls to _is_tied and km_w_ties to handle ties in
the data.If a censored observation and an uncensored observation has
the same value, it is assumed that the uncensored happened first. | _make_km | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def fit(self):
"""
Fits an AFT model and returns results instance
Parameters
----------
None
Returns
-------
Results instance.
Notes
-----
To avoid dividing by zero, max(endog) is assumed to be uncensored.
"""
return AFTResults(self) | Fits an AFT model and returns results instance
Parameters
----------
None
Returns
-------
Results instance.
Notes
-----
To avoid dividing by zero, max(endog) is assumed to be uncensored. | fit | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def params(self):
"""
Fits an AFT model and returns parameters.
Parameters
----------
None
Returns
-------
Fitted params
Notes
-----
To avoid dividing by zero, max(endog) is assumed to be uncensored.
"""
self.model.modif_censors = np.copy(self.model.censors)
self.model.modif_censors[-1] = 1
wts = self.model._make_km(self.model.endog, self.model.modif_censors)
res = WLS(self.model.endog, self.model.exog, wts).fit()
params = res.params
return params | Fits an AFT model and returns parameters.
Parameters
----------
None
Returns
-------
Fitted params
Notes
-----
To avoid dividing by zero, max(endog) is assumed to be uncensored. | params | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def test_beta(self, b0_vals, param_nums, ftol=10 ** - 5, maxiter=30,
print_weights=1):
"""
Returns the profile log likelihood for regression parameters
'param_num' at 'b0_vals.'
Parameters
----------
b0_vals : list
The value of parameters to be tested
param_num : list
Which parameters to be tested
maxiter : int, optional
How many iterations to use in the EM algorithm. Default is 30
ftol : float, optional
The function tolerance for the EM optimization.
Default is 10''**''-5
print_weights : bool
If true, returns the weights tate maximize the profile
log likelihood. Default is False
Returns
-------
test_results : tuple
The log-likelihood and p-pvalue of the test.
Notes
-----
The function will warn if the EM reaches the maxiter. However, when
optimizing over nuisance parameters, it is possible to reach a
maximum number of inner iterations for a specific value for the
nuisance parameters while the resultsof the function are still valid.
This usually occurs when the optimization over the nuisance parameters
selects parameter values that yield a log-likihood ratio close to
infinity.
Examples
--------
>>> import statsmodels.api as sm
>>> import numpy as np
# Test parameter is .05 in one regressor no intercept model
>>> data=sm.datasets.heart.load()
>>> y = np.log10(data.endog)
>>> x = data.exog
>>> cens = data.censors
>>> model = sm.emplike.emplikeAFT(y, x, cens)
>>> res=model.test_beta([0], [0])
>>> res
(1.4657739632606308, 0.22601365256959183)
#Test slope is 0 in model with intercept
>>> data=sm.datasets.heart.load()
>>> y = np.log10(data.endog)
>>> x = data.exog
>>> cens = data.censors
>>> model = sm.emplike.emplikeAFT(y, sm.add_constant(x), cens)
>>> res = model.test_beta([0], [1])
>>> res
(4.623487775078047, 0.031537049752572731)
"""
censors = self.model.censors
endog = self.model.endog
exog = self.model.exog
uncensored = (censors == 1).flatten()
censored = (censors == 0).flatten()
uncens_endog = endog[uncensored]
uncens_exog = exog[uncensored, :]
reg_model = OLS(uncens_endog, uncens_exog).fit()
llr, pval, new_weights = reg_model.el_test(b0_vals, param_nums,
return_weights=True) # Needs to be changed
km = self.model._make_km(endog, censors).flatten() # when merged
uncens_nobs = self.model.uncens_nobs
F = np.asarray(new_weights).reshape(uncens_nobs)
# Step 0 ^
params = self.params()
survidx = np.where(censors == 0)
survidx = survidx[0] - np.arange(len(survidx[0]))
numcensbelow = np.int_(np.cumsum(1 - censors))
if len(param_nums) == len(params):
llr = self._EM_test([], F=F, params=params,
param_nums=param_nums,
b0_vals=b0_vals, survidx=survidx,
uncens_nobs=uncens_nobs,
numcensbelow=numcensbelow, km=km,
uncensored=uncensored, censored=censored,
ftol=ftol, maxiter=25)
return llr, chi2.sf(llr, self.model.nvar)
else:
x0 = np.delete(params, param_nums)
try:
res = optimize.fmin(self._EM_test, x0,
(params, param_nums, b0_vals, F, survidx,
uncens_nobs, numcensbelow, km, uncensored,
censored, maxiter, ftol), full_output=1,
disp=0)
llr = res[1]
return llr, chi2.sf(llr, len(param_nums))
except np.linalg.LinAlgError:
return np.inf, 0 | Returns the profile log likelihood for regression parameters
'param_num' at 'b0_vals.'
Parameters
----------
b0_vals : list
The value of parameters to be tested
param_num : list
Which parameters to be tested
maxiter : int, optional
How many iterations to use in the EM algorithm. Default is 30
ftol : float, optional
The function tolerance for the EM optimization.
Default is 10''**''-5
print_weights : bool
If true, returns the weights tate maximize the profile
log likelihood. Default is False
Returns
-------
test_results : tuple
The log-likelihood and p-pvalue of the test.
Notes
-----
The function will warn if the EM reaches the maxiter. However, when
optimizing over nuisance parameters, it is possible to reach a
maximum number of inner iterations for a specific value for the
nuisance parameters while the resultsof the function are still valid.
This usually occurs when the optimization over the nuisance parameters
selects parameter values that yield a log-likihood ratio close to
infinity.
Examples
--------
>>> import statsmodels.api as sm
>>> import numpy as np
# Test parameter is .05 in one regressor no intercept model
>>> data=sm.datasets.heart.load()
>>> y = np.log10(data.endog)
>>> x = data.exog
>>> cens = data.censors
>>> model = sm.emplike.emplikeAFT(y, x, cens)
>>> res=model.test_beta([0], [0])
>>> res
(1.4657739632606308, 0.22601365256959183)
#Test slope is 0 in model with intercept
>>> data=sm.datasets.heart.load()
>>> y = np.log10(data.endog)
>>> x = data.exog
>>> cens = data.censors
>>> model = sm.emplike.emplikeAFT(y, sm.add_constant(x), cens)
>>> res = model.test_beta([0], [1])
>>> res
(4.623487775078047, 0.031537049752572731) | test_beta | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def ci_beta(self, param_num, beta_high, beta_low, sig=.05):
"""
Returns the confidence interval for a regression
parameter in the AFT model.
Parameters
----------
param_num : int
Parameter number of interest
beta_high : float
Upper bound for the confidence interval
beta_low : float
Lower bound for the confidence interval
sig : float, optional
Significance level. Default is .05
Notes
-----
If the function returns f(a) and f(b) must have different signs,
consider widening the search area by adjusting beta_low and
beta_high.
Also note that this process is computational intensive. There
are 4 levels of optimization/solving. From outer to inner:
1) Solving so that llr-critical value = 0
2) maximizing over nuisance parameters
3) Using EM at each value of nuisamce parameters
4) Using the _modified_Newton optimizer at each iteration
of the EM algorithm.
Also, for very unlikely nuisance parameters, it is possible for
the EM algorithm to not converge. This is not an indicator
that the solver did not find the correct solution. It just means
for a specific iteration of the nuisance parameters, the optimizer
was unable to converge.
If the user desires to verify the success of the optimization,
it is recommended to test the limits using test_beta.
"""
params = self.params()
self.r0 = chi2.ppf(1 - sig, 1)
ll = optimize.brentq(self._ci_limits_beta, beta_low,
params[param_num], (param_num))
ul = optimize.brentq(self._ci_limits_beta,
params[param_num], beta_high, (param_num))
return ll, ul | Returns the confidence interval for a regression
parameter in the AFT model.
Parameters
----------
param_num : int
Parameter number of interest
beta_high : float
Upper bound for the confidence interval
beta_low : float
Lower bound for the confidence interval
sig : float, optional
Significance level. Default is .05
Notes
-----
If the function returns f(a) and f(b) must have different signs,
consider widening the search area by adjusting beta_low and
beta_high.
Also note that this process is computational intensive. There
are 4 levels of optimization/solving. From outer to inner:
1) Solving so that llr-critical value = 0
2) maximizing over nuisance parameters
3) Using EM at each value of nuisamce parameters
4) Using the _modified_Newton optimizer at each iteration
of the EM algorithm.
Also, for very unlikely nuisance parameters, it is possible for
the EM algorithm to not converge. This is not an indicator
that the solver did not find the correct solution. It just means
for a specific iteration of the nuisance parameters, the optimizer
was unable to converge.
If the user desires to verify the success of the optimization,
it is recommended to test the limits using test_beta. | ci_beta | python | statsmodels/statsmodels | statsmodels/emplike/aft_el.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/aft_el.py | BSD-3-Clause |
def fit(self):
"""
Fits the model and provides regression results.
Returns
-------
Results : class
Empirical likelihood regression class.
"""
exog_with = add_constant(self.exog, prepend=True)
restricted_model = OLS(self.endog, exog_with)
restricted_fit = restricted_model.fit()
restricted_el = restricted_fit.el_test(
np.array([0]), np.array([0]), ret_params=1)
params = np.squeeze(restricted_el[3])
beta_hat_llr = restricted_el[0]
llf = np.sum(np.log(restricted_el[2]))
return OriginResults(restricted_model, params, beta_hat_llr, llf) | Fits the model and provides regression results.
Returns
-------
Results : class
Empirical likelihood regression class. | fit | python | statsmodels/statsmodels | statsmodels/emplike/originregress.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/originregress.py | BSD-3-Clause |
def el_test(self, b0_vals, param_nums, method='nm',
stochastic_exog=1, return_weights=0):
"""
Returns the llr and p-value for a hypothesized parameter value
for a regression that goes through the origin.
Parameters
----------
b0_vals : 1darray
The hypothesized value to be tested.
param_num : 1darray
Which parameters to test. Note this uses python
indexing but the '0' parameter refers to the intercept term,
which is assumed 0. Therefore, param_num should be > 0.
return_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. Default is False.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'.
stochastic_exog : bool
When TRUE, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. Default is TRUE.
Returns
-------
res : tuple
pvalue and likelihood ratio.
"""
b0_vals = np.hstack((0, b0_vals))
param_nums = np.hstack((0, param_nums))
test_res = self.model.fit().el_test(b0_vals, param_nums, method=method,
stochastic_exog=stochastic_exog,
return_weights=return_weights)
llr_test = test_res[0]
llr_res = llr_test - self.llr
pval = chi2.sf(llr_res, self.model.exog.shape[1] - 1)
if return_weights:
return llr_res, pval, test_res[2]
else:
return llr_res, pval | Returns the llr and p-value for a hypothesized parameter value
for a regression that goes through the origin.
Parameters
----------
b0_vals : 1darray
The hypothesized value to be tested.
param_num : 1darray
Which parameters to test. Note this uses python
indexing but the '0' parameter refers to the intercept term,
which is assumed 0. Therefore, param_num should be > 0.
return_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. Default is False.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'.
stochastic_exog : bool
When TRUE, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. Default is TRUE.
Returns
-------
res : tuple
pvalue and likelihood ratio. | el_test | python | statsmodels/statsmodels | statsmodels/emplike/originregress.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/originregress.py | BSD-3-Clause |
def conf_int_el(self, param_num, upper_bound=None,
lower_bound=None, sig=.05, method='nm',
stochastic_exog=True):
"""
Returns the confidence interval for a regression parameter when the
regression is forced through the origin.
Parameters
----------
param_num : int
The parameter number to be tested. Note this uses python
indexing but the '0' parameter refers to the intercept term.
upper_bound : float
The maximum value the upper confidence limit can be. The
closer this is to the confidence limit, the quicker the
computation. Default is .00001 confidence limit under normality.
lower_bound : float
The minimum value the lower confidence limit can be.
Default is .00001 confidence limit under normality.
sig : float, optional
The significance level. Default .05.
method : str, optional
Algorithm to optimize of nuisance params. Can be 'nm' or
'powell'. Default is 'nm'.
stochastic_exog : bool
Default is True.
Returns
-------
ci: tuple
The confidence interval for the parameter 'param_num'.
"""
r0 = chi2.ppf(1 - sig, 1)
param_num = np.array([param_num])
if upper_bound is None:
ci = np.asarray(self.model.fit().conf_int(.0001))
upper_bound = (np.squeeze(ci[param_num])[1])
if lower_bound is None:
ci = np.asarray(self.model.fit().conf_int(.0001))
lower_bound = (np.squeeze(ci[param_num])[0])
def f(b0):
b0 = np.array([b0])
val = self.el_test(
b0, param_num, method=method, stochastic_exog=stochastic_exog
)
return val[0] - r0
_param = np.squeeze(self.params[param_num])
lowerl = optimize.brentq(f, np.squeeze(lower_bound), _param)
upperl = optimize.brentq(f, _param, np.squeeze(upper_bound))
return (lowerl, upperl) | Returns the confidence interval for a regression parameter when the
regression is forced through the origin.
Parameters
----------
param_num : int
The parameter number to be tested. Note this uses python
indexing but the '0' parameter refers to the intercept term.
upper_bound : float
The maximum value the upper confidence limit can be. The
closer this is to the confidence limit, the quicker the
computation. Default is .00001 confidence limit under normality.
lower_bound : float
The minimum value the lower confidence limit can be.
Default is .00001 confidence limit under normality.
sig : float, optional
The significance level. Default .05.
method : str, optional
Algorithm to optimize of nuisance params. Can be 'nm' or
'powell'. Default is 'nm'.
stochastic_exog : bool
Default is True.
Returns
-------
ci: tuple
The confidence interval for the parameter 'param_num'. | conf_int_el | python | statsmodels/statsmodels | statsmodels/emplike/originregress.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/originregress.py | BSD-3-Clause |
def _opt_common_mu(self, mu):
"""
Optimizes the likelihood under the null hypothesis that all groups have
mean mu.
Parameters
----------
mu : float
The common mean.
Returns
-------
llr : float
-2 times the llr ratio, which is the test statistic.
"""
nobs = self.nobs
endog = self.endog
num_groups = self.num_groups
endog_asarray = np.zeros((nobs, num_groups))
obs_num = 0
for arr_num in range(len(endog)):
new_obs_num = obs_num + len(endog[arr_num])
endog_asarray[obs_num: new_obs_num, arr_num] = endog[arr_num] - \
mu
obs_num = new_obs_num
est_vect = endog_asarray
wts = np.ones(est_vect.shape[0]) * (1. / (est_vect.shape[0]))
eta_star = self._modif_newton(np.zeros(num_groups), est_vect, wts)
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr | Optimizes the likelihood under the null hypothesis that all groups have
mean mu.
Parameters
----------
mu : float
The common mean.
Returns
-------
llr : float
-2 times the llr ratio, which is the test statistic. | _opt_common_mu | python | statsmodels/statsmodels | statsmodels/emplike/elanova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/elanova.py | BSD-3-Clause |
def compute_ANOVA(self, mu=None, mu_start=0, return_weights=0):
"""
Returns -2 log likelihood, the pvalue and the maximum likelihood
estimate for a common mean.
Parameters
----------
mu : float
If a mu is specified, ANOVA is conducted with mu as the
common mean. Otherwise, the common mean is the maximum
empirical likelihood estimate of the common mean.
Default is None.
mu_start : float
Starting value for commean mean if specific mu is not specified.
Default = 0.
return_weights : bool
if TRUE, returns the weights on observations that maximize the
likelihood. Default is FALSE.
Returns
-------
res: tuple
The log-likelihood, p-value and estimate for the common mean.
"""
if mu is not None:
llr = self._opt_common_mu(mu)
pval = 1 - chi2.cdf(llr, self.num_groups - 1)
if return_weights:
return llr, pval, mu, self.new_weights
else:
return llr, pval, mu
else:
res = optimize.fmin_powell(self._opt_common_mu, mu_start,
full_output=1, disp=False)
llr = res[1]
mu_common = float(np.squeeze(res[0]))
pval = 1 - chi2.cdf(llr, self.num_groups - 1)
if return_weights:
return llr, pval, mu_common, self.new_weights
else:
return llr, pval, mu_common | Returns -2 log likelihood, the pvalue and the maximum likelihood
estimate for a common mean.
Parameters
----------
mu : float
If a mu is specified, ANOVA is conducted with mu as the
common mean. Otherwise, the common mean is the maximum
empirical likelihood estimate of the common mean.
Default is None.
mu_start : float
Starting value for commean mean if specific mu is not specified.
Default = 0.
return_weights : bool
if TRUE, returns the weights on observations that maximize the
likelihood. Default is FALSE.
Returns
-------
res: tuple
The log-likelihood, p-value and estimate for the common mean. | compute_ANOVA | python | statsmodels/statsmodels | statsmodels/emplike/elanova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/elanova.py | BSD-3-Clause |
def _opt_nuis_regress(self, nuisance_params, param_nums=None,
endog=None, exog=None,
nobs=None, nvar=None, params=None, b0_vals=None,
stochastic_exog=None):
"""
A function that is optimized over nuisance parameters to conduct a
hypothesis test for the parameters of interest.
Parameters
----------
nuisance_params: 1darray
Parameters to be optimized over.
Returns
-------
llr : float
-2 x the log-likelihood of the nuisance parameters and the
hypothesized value of the parameter(s) of interest.
"""
params[param_nums] = b0_vals
nuis_param_index = np.int_(np.delete(np.arange(nvar),
param_nums))
params[nuis_param_index] = nuisance_params
new_params = params.reshape(nvar, 1)
self.new_params = new_params
est_vect = exog * \
(endog - np.squeeze(np.dot(exog, new_params))).reshape(int(nobs), 1)
if not stochastic_exog:
exog_means = np.mean(exog, axis=0)[1:]
exog_mom2 = (np.sum(exog * exog, axis=0))[1:]\
/ nobs
mean_est_vect = exog[:, 1:] - exog_means
mom2_est_vect = (exog * exog)[:, 1:] - exog_mom2
regressor_est_vect = np.concatenate((mean_est_vect, mom2_est_vect),
axis=1)
est_vect = np.concatenate((est_vect, regressor_est_vect),
axis=1)
wts = np.ones(int(nobs)) * (1. / nobs)
x0 = np.zeros(est_vect.shape[1]).reshape(-1, 1)
try:
eta_star = self._modif_newton(x0, est_vect, wts)
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
# the following commented out code is to verify weights
# see open issue #1845
#self.new_weights /= self.new_weights.sum()
#if not np.allclose(self.new_weights.sum(), 1., rtol=0, atol=1e-10):
# raise RuntimeError('weights do not sum to 1')
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
except np.linalg.LinAlgError:
return np.inf | A function that is optimized over nuisance parameters to conduct a
hypothesis test for the parameters of interest.
Parameters
----------
nuisance_params: 1darray
Parameters to be optimized over.
Returns
-------
llr : float
-2 x the log-likelihood of the nuisance parameters and the
hypothesized value of the parameter(s) of interest. | _opt_nuis_regress | python | statsmodels/statsmodels | statsmodels/emplike/elregress.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/elregress.py | BSD-3-Clause |
def DescStat(endog):
"""
Returns an instance to conduct inference on descriptive statistics
via empirical likelihood. See DescStatUV and DescStatMV for more
information.
Parameters
----------
endog : ndarray
Array of data
Returns : DescStat instance
If k=1, the function returns a univariate instance, DescStatUV.
If k>1, the function returns a multivariate instance, DescStatMV.
"""
if endog.ndim == 1:
endog = endog.reshape(len(endog), 1)
if endog.shape[1] == 1:
return DescStatUV(endog)
if endog.shape[1] > 1:
return DescStatMV(endog) | Returns an instance to conduct inference on descriptive statistics
via empirical likelihood. See DescStatUV and DescStatMV for more
information.
Parameters
----------
endog : ndarray
Array of data
Returns : DescStat instance
If k=1, the function returns a univariate instance, DescStatUV.
If k>1, the function returns a multivariate instance, DescStatMV. | DescStat | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _log_star(self, eta, est_vect, weights, nobs):
"""
Transforms the log of observation probabilities in terms of the
Lagrange multiplier to the log 'star' of the probabilities.
Parameters
----------
eta : float
Lagrange multiplier
est_vect : ndarray (n,k)
Estimating equations vector
wts : nx1 array
Observation weights
Returns
------
data_star : ndarray
The weighted logstar of the estimting equations
Notes
-----
This function is only a placeholder for the _fit_Newton.
The function value is not used in optimization and the optimal value
is disregarded when computing the log likelihood ratio.
"""
data_star = np.log(weights) + (np.sum(weights) +\
np.dot(est_vect, eta))
idx = data_star < 1. / nobs
not_idx = ~idx
nx = nobs * data_star[idx]
data_star[idx] = np.log(1. / nobs) - 1.5 + nx * (2. - nx / 2)
data_star[not_idx] = np.log(data_star[not_idx])
return data_star | Transforms the log of observation probabilities in terms of the
Lagrange multiplier to the log 'star' of the probabilities.
Parameters
----------
eta : float
Lagrange multiplier
est_vect : ndarray (n,k)
Estimating equations vector
wts : nx1 array
Observation weights
Returns
------
data_star : ndarray
The weighted logstar of the estimting equations
Notes
-----
This function is only a placeholder for the _fit_Newton.
The function value is not used in optimization and the optimal value
is disregarded when computing the log likelihood ratio. | _log_star | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _hess(self, eta, est_vect, weights, nobs):
"""
Calculates the hessian of a weighted empirical likelihood
problem.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
hess : m x m array
Weighted hessian used in _wtd_modif_newton
"""
#eta = np.squeeze(eta)
data_star_doub_prime = np.sum(weights) + np.dot(est_vect, eta)
idx = data_star_doub_prime < 1. / nobs
not_idx = ~idx
data_star_doub_prime[idx] = - nobs ** 2
data_star_doub_prime[not_idx] = - (data_star_doub_prime[not_idx]) ** -2
wtd_dsdp = weights * data_star_doub_prime
return np.dot(est_vect.T, wtd_dsdp[:, None] * est_vect) | Calculates the hessian of a weighted empirical likelihood
problem.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
hess : m x m array
Weighted hessian used in _wtd_modif_newton | _hess | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _grad(self, eta, est_vect, weights, nobs):
"""
Calculates the gradient of a weighted empirical likelihood
problem
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
gradient : ndarray (m,1)
The gradient used in _wtd_modif_newton
"""
#eta = np.squeeze(eta)
data_star_prime = np.sum(weights) + np.dot(est_vect, eta)
idx = data_star_prime < 1. / nobs
not_idx = ~idx
data_star_prime[idx] = nobs * (2 - nobs * data_star_prime[idx])
data_star_prime[not_idx] = 1. / data_star_prime[not_idx]
return np.dot(weights * data_star_prime, est_vect) | Calculates the gradient of a weighted empirical likelihood
problem
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
gradient : ndarray (m,1)
The gradient used in _wtd_modif_newton | _grad | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _modif_newton(self, eta, est_vect, weights):
"""
Modified Newton's method for maximizing the log 'star' equation. This
function calls _fit_newton to find the optimal values of eta.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
params : 1xm array
Lagrange multiplier that maximizes the log-likelihood
"""
nobs = len(est_vect)
def f(x0):
return -np.sum(self._log_star(x0, est_vect, weights, nobs))
def grad(x0):
return -self._grad(x0, est_vect, weights, nobs)
def hess(x0):
return -self._hess(x0, est_vect, weights, nobs)
kwds = {'tol': 1e-8}
eta = eta.squeeze()
res = _fit_newton(f, grad, eta, (), kwds, hess=hess, maxiter=50, \
disp=0)
return res[0] | Modified Newton's method for maximizing the log 'star' equation. This
function calls _fit_newton to find the optimal values of eta.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
params : 1xm array
Lagrange multiplier that maximizes the log-likelihood | _modif_newton | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _find_eta(self, eta):
"""
Finding the root of sum(xi-h0)/(1+eta(xi-mu)) solves for
eta when computing ELR for univariate mean.
Parameters
----------
eta : float
Lagrange multiplier in the empirical likelihood maximization
Returns
-------
llr : float
n times the log likelihood value for a given value of eta
"""
return np.sum((self.endog - self.mu0) / \
(1. + eta * (self.endog - self.mu0))) | Finding the root of sum(xi-h0)/(1+eta(xi-mu)) solves for
eta when computing ELR for univariate mean.
Parameters
----------
eta : float
Lagrange multiplier in the empirical likelihood maximization
Returns
-------
llr : float
n times the log likelihood value for a given value of eta | _find_eta | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _ci_limits_mu(self, mu):
"""
Calculates the difference between the log likelihood of mu_test and a
specified critical value.
Parameters
----------
mu : float
Hypothesized value of the mean.
Returns
-------
diff : float
The difference between the log likelihood value of mu0 and
a specified value.
"""
return self.test_mean(mu)[0] - self.r0 | Calculates the difference between the log likelihood of mu_test and a
specified critical value.
Parameters
----------
mu : float
Hypothesized value of the mean.
Returns
-------
diff : float
The difference between the log likelihood value of mu0 and
a specified value. | _ci_limits_mu | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _find_gamma(self, gamma):
"""
Finds gamma that satisfies
sum(log(n * w(gamma))) - log(r0) = 0
Used for confidence intervals for the mean
Parameters
----------
gamma : float
Lagrange multiplier when computing confidence interval
Returns
-------
diff : float
The difference between the log-liklihood when the Lagrange
multiplier is gamma and a pre-specified value
"""
denom = np.sum((self.endog - gamma) ** -1)
new_weights = (self.endog - gamma) ** -1 / denom
return -2 * np.sum(np.log(self.nobs * new_weights)) - \
self.r0 | Finds gamma that satisfies
sum(log(n * w(gamma))) - log(r0) = 0
Used for confidence intervals for the mean
Parameters
----------
gamma : float
Lagrange multiplier when computing confidence interval
Returns
-------
diff : float
The difference between the log-liklihood when the Lagrange
multiplier is gamma and a pre-specified value | _find_gamma | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _opt_var(self, nuisance_mu, pval=False):
"""
This is the function to be optimized over a nuisance mean parameter
to determine the likelihood ratio for the variance
Parameters
----------
nuisance_mu : float
Value of a nuisance mean parameter
Returns
-------
llr : float
Log likelihood of a pre-specified variance holding the nuisance
parameter constant
"""
endog = self.endog
nobs = self.nobs
sig_data = ((endog - nuisance_mu) ** 2 \
- self.sig2_0)
mu_data = (endog - nuisance_mu)
est_vect = np.column_stack((mu_data, sig_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1 + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
if pval: # Used for contour plotting
return chi2.sf(-2 * llr, 1)
return -2 * llr | This is the function to be optimized over a nuisance mean parameter
to determine the likelihood ratio for the variance
Parameters
----------
nuisance_mu : float
Value of a nuisance mean parameter
Returns
-------
llr : float
Log likelihood of a pre-specified variance holding the nuisance
parameter constant | _opt_var | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _ci_limits_var(self, var):
"""
Used to determine the confidence intervals for the variance.
It calls test_var and when called by an optimizer,
finds the value of sig2_0 that is chi2.ppf(significance-level)
Parameters
----------
var_test : float
Hypothesized value of the variance
Returns
-------
diff : float
The difference between the log likelihood ratio at var_test and a
pre-specified value.
"""
return self.test_var(var)[0] - self.r0 | Used to determine the confidence intervals for the variance.
It calls test_var and when called by an optimizer,
finds the value of sig2_0 that is chi2.ppf(significance-level)
Parameters
----------
var_test : float
Hypothesized value of the variance
Returns
-------
diff : float
The difference between the log likelihood ratio at var_test and a
pre-specified value. | _ci_limits_var | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _opt_skew(self, nuis_params):
"""
Called by test_skew. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-specified skewness holding
the nuisance parameters constant.
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
skew_data = (((endog - nuis_params[0]) ** 3) /
(nuis_params[1] ** 1.5)) - self.skew0
est_vect = np.column_stack((mu_data, sig_data, skew_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr | Called by test_skew. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-specified skewness holding
the nuisance parameters constant. | _opt_skew | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _opt_kurt(self, nuis_params):
"""
Called by test_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-speified kurtosis holding the
nuisance parameters constant
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
kurt_data = ((((endog - nuis_params[0]) ** 4) / \
(nuis_params[1] ** 2)) - 3) - self.kurt0
est_vect = np.column_stack((mu_data, sig_data, kurt_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1 + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr | Called by test_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-speified kurtosis holding the
nuisance parameters constant | _opt_kurt | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _opt_skew_kurt(self, nuis_params):
"""
Called by test_joint_skew_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
------
llr : float
The log likelihood ratio of a pre-speified skewness and
kurtosis holding the nuisance parameters constant.
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
skew_data = (((endog - nuis_params[0]) ** 3) / \
(nuis_params[1] ** 1.5)) - self.skew0
kurt_data = ((((endog - nuis_params[0]) ** 4) / \
(nuis_params[1] ** 2)) - 3) - self.kurt0
est_vect = np.column_stack((mu_data, sig_data, skew_data, kurt_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr | Called by test_joint_skew_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
------
llr : float
The log likelihood ratio of a pre-speified skewness and
kurtosis holding the nuisance parameters constant. | _opt_skew_kurt | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _ci_limits_skew(self, skew):
"""
Parameters
----------
skew0 : float
Hypothesized value of skewness
Returns
-------
diff : float
The difference between the log likelihood ratio at skew and a
pre-specified value.
"""
return self.test_skew(skew)[0] - self.r0 | Parameters
----------
skew0 : float
Hypothesized value of skewness
Returns
-------
diff : float
The difference between the log likelihood ratio at skew and a
pre-specified value. | _ci_limits_skew | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _ci_limits_kurt(self, kurt):
"""
Parameters
----------
skew0 : float
Hypothesized value of kurtosis
Returns
-------
diff : float
The difference between the log likelihood ratio at kurt and a
pre-specified value.
"""
return self.test_kurt(kurt)[0] - self.r0 | Parameters
----------
skew0 : float
Hypothesized value of kurtosis
Returns
-------
diff : float
The difference between the log likelihood ratio at kurt and a
pre-specified value. | _ci_limits_kurt | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def _opt_correl(self, nuis_params, corr0, endog, nobs, x0, weights0):
"""
Parameters
----------
nuis_params : 1darray
Array containing two nuisance means and two nuisance variances
Returns
-------
llr : float
The log-likelihood of the correlation coefficient holding nuisance
parameters constant
"""
mu1_data, mu2_data = (endog - nuis_params[::2]).T
sig1_data = mu1_data ** 2 - nuis_params[1]
sig2_data = mu2_data ** 2 - nuis_params[3]
correl_data = ((mu1_data * mu2_data) - corr0 *
(nuis_params[1] * nuis_params[3]) ** .5)
est_vect = np.column_stack((mu1_data, sig1_data,
mu2_data, sig2_data, correl_data))
eta_star = self._modif_newton(x0, est_vect, weights0)
denom = 1. + np.dot(est_vect, eta_star)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr | Parameters
----------
nuis_params : 1darray
Array containing two nuisance means and two nuisance variances
Returns
-------
llr : float
The log-likelihood of the correlation coefficient holding nuisance
parameters constant | _opt_correl | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
def test_mean(self, mu0, return_weights=False):
"""
Returns - 2 x log-likelihood ratio, p-value and weights
for a hypothesis test of the mean.
Parameters
----------
mu0 : float
Mean value to be tested
return_weights : bool
If return_weights is True the function returns
the weights of the observations under the null hypothesis.
Default is False
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of mu0
"""
self.mu0 = mu0
endog = self.endog
nobs = self.nobs
eta_min = (1. - (1. / nobs)) / (self.mu0 - max(endog))
eta_max = (1. - (1. / nobs)) / (self.mu0 - min(endog))
eta_star = optimize.brentq(self._find_eta, eta_min, eta_max)
new_weights = (1. / nobs) * 1. / (1. + eta_star * (endog - self.mu0))
llr = -2 * np.sum(np.log(nobs * new_weights))
if return_weights:
return llr, chi2.sf(llr, 1), new_weights
else:
return llr, chi2.sf(llr, 1) | Returns - 2 x log-likelihood ratio, p-value and weights
for a hypothesis test of the mean.
Parameters
----------
mu0 : float
Mean value to be tested
return_weights : bool
If return_weights is True the function returns
the weights of the observations under the null hypothesis.
Default is False
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of mu0 | test_mean | python | statsmodels/statsmodels | statsmodels/emplike/descriptive.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/emplike/descriptive.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.