code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def plot2d(self,ix=0,iy=1,clf=True):
"""
Generates a 2-dimensional plot of the data set and principle components
using matplotlib.
ix specifies which p-dimension to put on the x-axis of the plot
and iy specifies which to put on the y-axis (0-indexed)
"""
import matplotlib.pyplot as plt
x,y=self.N[:,ix],self.N[:,iy]
if clf:
plt.clf()
plt.scatter(x,y)
vals,evs=self.getEigensystem()
#evx,evy=evs[:,ix],evs[:,iy]
xl,xu=plt.xlim()
yl,yu=plt.ylim()
dx,dy=(xu-xl),(yu-yl)
for val,vec,c in zip(vals,evs.T,self._colors):
plt.arrow(0,0,val*vec[ix],val*vec[iy],head_width=0.05*(dx*dy/4)**0.5,fc=c,ec=c)
#plt.arrow(0,0,vals[ix]*evs[ix,ix],vals[ix]*evs[iy,ix],head_width=0.05*(dx*dy/4)**0.5,fc='g',ec='g')
#plt.arrow(0,0,vals[iy]*evs[ix,iy],vals[iy]*evs[iy,iy],head_width=0.05*(dx*dy/4)**0.5,fc='r',ec='r')
if self.names is not None:
plt.xlabel('$'+self.names[ix]+'/\\sigma$')
plt.ylabel('$'+self.names[iy]+'/\\sigma$') | Generates a 2-dimensional plot of the data set and principle components
using matplotlib.
ix specifies which p-dimension to put on the x-axis of the plot
and iy specifies which to put on the y-axis (0-indexed) | plot2d | python | statsmodels/statsmodels | statsmodels/sandbox/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/pca.py | BSD-3-Clause |
def plot3d(self,ix=0,iy=1,iz=2,clf=True):
"""
Generates a 3-dimensional plot of the data set and principle components
using mayavi.
ix, iy, and iz specify which of the input p-dimensions to place on each of
the x,y,z axes, respectively (0-indexed).
"""
import enthought.mayavi.mlab as M
if clf:
M.clf()
z3=np.zeros(3)
v=(self.getEigenvectors()*self.getEigenvalues())
M.quiver3d(z3,z3,z3,v[ix],v[iy],v[iz],scale_factor=5)
M.points3d(self.N[:,ix],self.N[:,iy],self.N[:,iz],scale_factor=0.3)
if self.names:
M.axes(xlabel=self.names[ix]+'/sigma',ylabel=self.names[iy]+'/sigma',zlabel=self.names[iz]+'/sigma')
else:
M.axes() | Generates a 3-dimensional plot of the data set and principle components
using mayavi.
ix, iy, and iz specify which of the input p-dimensions to place on each of
the x,y,z axes, respectively (0-indexed). | plot3d | python | statsmodels/statsmodels | statsmodels/sandbox/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/pca.py | BSD-3-Clause |
def sigclip(self,sigs):
"""
clips out all data points that are more than a certain number
of standard deviations from the mean.
sigs can be either a single value or a length-p sequence that
specifies the number of standard deviations along each of the
p dimensions.
"""
if np.isscalar(sigs):
sigs=sigs*np.ones(self.N.shape[1])
sigs = sigs*np.std(self.N,axis=1)
n = self.N.shape[0]
m = np.all(np.abs(self.N) < sigs,axis=1)
self.A=self.A[m]
self.__calc()
return n-sum(m) | clips out all data points that are more than a certain number
of standard deviations from the mean.
sigs can be either a single value or a length-p sequence that
specifies the number of standard deviations along each of the
p dimensions. | sigclip | python | statsmodels/statsmodels | statsmodels/sandbox/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/pca.py | BSD-3-Clause |
def project(self,vals=None,enthresh=None,nPCs=None,cumen=None):
"""
projects the normalized values onto the components
enthresh, nPCs, and cumen determine how many PCs to use
if vals is None, the normalized data vectors are the values to project.
Otherwise, it should be convertable to a p x N array
returns n,p(>threshold) dimension array
"""
nonnones = sum([e is not None for e in (enthresh, nPCs, cumen)])
if nonnones == 0:
m = slice(None)
elif nonnones > 1:
raise ValueError("cannot specify more than one threshold")
else:
if enthresh is not None:
m = self.energies() > enthresh
elif nPCs is not None:
m = slice(None,nPCs)
elif cumen is not None:
m = np.cumsum(self.energies()) < cumen
else:
raise RuntimeError('Should be unreachable')
if vals is None:
vals = self.N.T
else:
vals = np.array(vals,copy=False)
if self.N.T.shape[0] != vals.shape[0]:
raise ValueError("shape for vals does not match")
proj = np.matrix(self.getEigenvectors()).T*vals
return proj[m].T | projects the normalized values onto the components
enthresh, nPCs, and cumen determine how many PCs to use
if vals is None, the normalized data vectors are the values to project.
Otherwise, it should be convertable to a p x N array
returns n,p(>threshold) dimension array | project | python | statsmodels/statsmodels | statsmodels/sandbox/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/pca.py | BSD-3-Clause |
def deproject(self,A,normed=True):
"""
input is an n X q array, where q <= p
output is p X n
"""
A=np.atleast_2d(A)
n,q = A.shape
p = self.A.shape[1]
if q > p :
raise ValueError("q > p")
evinv=np.linalg.inv(np.matrix(self.getEigenvectors()).T)
zs = np.zeros((n,p))
zs[:,:q]=A
proj = evinv*zs.T
if normed:
return np.array(proj.T).T
else:
mns=np.mean(self.A,axis=0)
sds=np.std(self.M,axis=0)
return (np.array(proj.T)*sds+mns).T | input is an n X q array, where q <= p
output is p X n | deproject | python | statsmodels/statsmodels | statsmodels/sandbox/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/pca.py | BSD-3-Clause |
def subtractPC(self,pc,vals=None):
"""
pc can be a scalar or any sequence of pc indecies
if vals is None, the source data is self.A, else whatever is in vals
(which must be p x m)
"""
if vals is None:
vals = self.A
else:
vals = vals.T
if vals.shape[1]!= self.A.shape[1]:
raise ValueError("vals do not have the correct number of components")
pcs=self.project()
zpcs=np.zeros_like(pcs)
zpcs[:,pc]=pcs[:,pc]
upc=self.deproject(zpcs,False)
A = vals.T-upc
B = A.T*np.std(self.M,axis=0)
return B+np.mean(self.A,axis=0) | pc can be a scalar or any sequence of pc indecies
if vals is None, the source data is self.A, else whatever is in vals
(which must be p x m) | subtractPC | python | statsmodels/statsmodels | statsmodels/sandbox/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/pca.py | BSD-3-Clause |
def Rp(v):
""" Gradient """
result = 2*(A*v-R(v)*B*v)/dot(v.T,B*v)
#print "Rp: ", result
return result | Gradient | Rp | python | statsmodels/statsmodels | statsmodels/sandbox/mle.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/mle.py | BSD-3-Clause |
def Rpp(v):
""" Hessian """
result = 2*(A-R(v)*B-outer(B*v,Rp(v))-outer(Rp(v),B*v))/dot(v.T,B*v)
#print "Rpp: ", result
return result | Hessian | Rpp | python | statsmodels/statsmodels | statsmodels/sandbox/mle.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/mle.py | BSD-3-Clause |
def _band2array(a, lower=0, symmetric=False, hermitian=False):
"""
Take an upper or lower triangular banded matrix and return a
numpy array.
INPUTS:
a -- a matrix in upper or lower triangular banded matrix
lower -- is the matrix upper or lower triangular?
symmetric -- if True, return the original result plus its transpose
hermitian -- if True (and symmetric False), return the original
result plus its conjugate transposed
"""
n = a.shape[1]
r = a.shape[0]
_a = 0
if not lower:
for j in range(r):
_b = np.diag(a[r-1-j],k=j)[j:(n+j),j:(n+j)]
_a += _b
if symmetric and j > 0:
_a += _b.T
elif hermitian and j > 0:
_a += _b.conjugate().T
else:
for j in range(r):
_b = np.diag(a[j],k=j)[0:n,0:n]
_a += _b
if symmetric and j > 0:
_a += _b.T
elif hermitian and j > 0:
_a += _b.conjugate().T
_a = _a.T
return _a | Take an upper or lower triangular banded matrix and return a
numpy array.
INPUTS:
a -- a matrix in upper or lower triangular banded matrix
lower -- is the matrix upper or lower triangular?
symmetric -- if True, return the original result plus its transpose
hermitian -- if True (and symmetric False), return the original
result plus its conjugate transposed | _band2array | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def _upper2lower(ub):
"""
Convert upper triangular banded matrix to lower banded form.
INPUTS:
ub -- an upper triangular banded matrix
OUTPUTS: lb
lb -- a lower triangular banded matrix with same entries
as ub
"""
lb = np.zeros(ub.shape, ub.dtype)
nrow, ncol = ub.shape
for i in range(ub.shape[0]):
lb[i,0:(ncol-i)] = ub[nrow-1-i,i:ncol]
lb[i,(ncol-i):] = ub[nrow-1-i,0:i]
return lb | Convert upper triangular banded matrix to lower banded form.
INPUTS:
ub -- an upper triangular banded matrix
OUTPUTS: lb
lb -- a lower triangular banded matrix with same entries
as ub | _upper2lower | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def _lower2upper(lb):
"""
Convert lower triangular banded matrix to upper banded form.
INPUTS:
lb -- a lower triangular banded matrix
OUTPUTS: ub
ub -- an upper triangular banded matrix with same entries
as lb
"""
ub = np.zeros(lb.shape, lb.dtype)
nrow, ncol = lb.shape
for i in range(lb.shape[0]):
ub[nrow-1-i,i:ncol] = lb[i,0:(ncol-i)]
ub[nrow-1-i,0:i] = lb[i,(ncol-i):]
return ub | Convert lower triangular banded matrix to upper banded form.
INPUTS:
lb -- a lower triangular banded matrix
OUTPUTS: ub
ub -- an upper triangular banded matrix with same entries
as lb | _lower2upper | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def _triangle2unit(tb, lower=0):
"""
Take a banded triangular matrix and return its diagonal and the
unit matrix: the banded triangular matrix with 1's on the diagonal,
i.e. each row is divided by the corresponding entry on the diagonal.
INPUTS:
tb -- a lower triangular banded matrix
lower -- if True, then tb is assumed to be lower triangular banded,
in which case return value is also lower triangular banded.
OUTPUTS: d, b
d -- diagonal entries of tb
b -- unit matrix: if lower is False, b is upper triangular
banded and its rows of have been divided by d,
else lower is True, b is lower triangular banded
and its columns have been divieed by d.
"""
if lower:
d = tb[0].copy()
else:
d = tb[-1].copy()
if lower:
return d, (tb / d)
else:
lnum = _upper2lower(tb)
return d, _lower2upper(lnum / d) | Take a banded triangular matrix and return its diagonal and the
unit matrix: the banded triangular matrix with 1's on the diagonal,
i.e. each row is divided by the corresponding entry on the diagonal.
INPUTS:
tb -- a lower triangular banded matrix
lower -- if True, then tb is assumed to be lower triangular banded,
in which case return value is also lower triangular banded.
OUTPUTS: d, b
d -- diagonal entries of tb
b -- unit matrix: if lower is False, b is upper triangular
banded and its rows of have been divided by d,
else lower is True, b is lower triangular banded
and its columns have been divieed by d. | _triangle2unit | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def _trace_symbanded(a, b, lower=0):
"""
Compute the trace(ab) for two upper or banded real symmetric matrices
stored either in either upper or lower form.
INPUTS:
a, b -- two banded real symmetric matrices (either lower or upper)
lower -- if True, a and b are assumed to be the lower half
OUTPUTS: trace
trace -- trace(ab)
"""
if lower:
t = _zero_triband(a * b, lower=1)
return t[0].sum() + 2 * t[1:].sum()
else:
t = _zero_triband(a * b, lower=0)
return t[-1].sum() + 2 * t[:-1].sum() | Compute the trace(ab) for two upper or banded real symmetric matrices
stored either in either upper or lower form.
INPUTS:
a, b -- two banded real symmetric matrices (either lower or upper)
lower -- if True, a and b are assumed to be the lower half
OUTPUTS: trace
trace -- trace(ab) | _trace_symbanded | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def _zero_triband(a, lower=0):
"""
Explicitly zero out unused elements of a real symmetric banded matrix.
INPUTS:
a -- a real symmetric banded matrix (either upper or lower hald)
lower -- if True, a is assumed to be the lower half
"""
nrow, ncol = a.shape
if lower:
for i in range(nrow):
a[i, (ncol-i):] = 0.
else:
for i in range(nrow):
a[i, 0:i] = 0.
return a | Explicitly zero out unused elements of a real symmetric banded matrix.
INPUTS:
a -- a real symmetric banded matrix (either upper or lower hald)
lower -- if True, a is assumed to be the lower half | _zero_triband | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def __call__(self, *args):
"""
Evaluate the BSpline at a given point, yielding
a matrix B and return
B * self.coef
INPUTS:
args -- optional arguments. If None, it returns self._basisx,
the BSpline evaluated at the x values passed in __init__.
Otherwise, return the BSpline evaluated at the
first argument args[0].
OUTPUTS: y
y -- value of Bspline at specified x values
BUGS:
If self has no attribute x, an exception will be raised
because self has no attribute _basisx.
"""
if not args:
b = self._basisx.T
else:
x = args[0]
b = np.asarray(self.basis(x)).T
return np.squeeze(np.dot(b, self.coef)) | Evaluate the BSpline at a given point, yielding
a matrix B and return
B * self.coef
INPUTS:
args -- optional arguments. If None, it returns self._basisx,
the BSpline evaluated at the x values passed in __init__.
Otherwise, return the BSpline evaluated at the
first argument args[0].
OUTPUTS: y
y -- value of Bspline at specified x values
BUGS:
If self has no attribute x, an exception will be raised
because self has no attribute _basisx. | __call__ | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def basis_element(self, x, i, d=0):
"""
Evaluate a particular basis element of the BSpline,
or its derivative.
INPUTS:
x -- x values at which to evaluate the basis element
i -- which element of the BSpline to return
d -- the order of derivative
OUTPUTS: y
y -- value of d-th derivative of the i-th basis element
of the BSpline at specified x values
"""
x = np.asarray(x, np.float64)
_shape = x.shape
if _shape == ():
x.shape = (1,)
x.shape = (np.product(_shape,axis=0),)
if i < self.tau.shape[0] - 1:
# TODO: OWNDATA flags...
v = _hbspline.evaluate(x, self.tau, self.m, d, i, i+1)
else:
return np.zeros(x.shape, np.float64)
if (i == self.tau.shape[0] - self.m):
v = np.where(np.equal(x, self.tau[-1]), 1, v)
v.shape = _shape
return v | Evaluate a particular basis element of the BSpline,
or its derivative.
INPUTS:
x -- x values at which to evaluate the basis element
i -- which element of the BSpline to return
d -- the order of derivative
OUTPUTS: y
y -- value of d-th derivative of the i-th basis element
of the BSpline at specified x values | basis_element | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def basis(self, x, d=0, lower=None, upper=None):
"""
Evaluate the basis of the BSpline or its derivative.
If lower or upper is specified, then only
the [lower:upper] elements of the basis are returned.
INPUTS:
x -- x values at which to evaluate the basis element
i -- which element of the BSpline to return
d -- the order of derivative
lower -- optional lower limit of the set of basis
elements
upper -- optional upper limit of the set of basis
elements
OUTPUTS: y
y -- value of d-th derivative of the basis elements
of the BSpline at specified x values
"""
x = np.asarray(x)
_shape = x.shape
if _shape == ():
x.shape = (1,)
x.shape = (np.product(_shape,axis=0),)
if upper is None:
upper = self.tau.shape[0] - self.m
if lower is None:
lower = 0
upper = min(upper, self.tau.shape[0] - self.m)
lower = max(0, lower)
d = np.asarray(d)
if d.shape == ():
v = _hbspline.evaluate(x, self.tau, self.m, int(d), lower, upper)
else:
if d.shape[0] != 2:
raise ValueError("if d is not an integer, expecting a jx2 \
array with first row indicating order \
of derivative, second row coefficient in front.")
v = 0
for i in range(d.shape[1]):
v += d[1,i] * _hbspline.evaluate(x, self.tau, self.m, d[0,i], lower, upper)
v.shape = (upper-lower,) + _shape
if upper == self.tau.shape[0] - self.m:
v[-1] = np.where(np.equal(x, self.tau[-1]), 1, v[-1])
return v | Evaluate the basis of the BSpline or its derivative.
If lower or upper is specified, then only
the [lower:upper] elements of the basis are returned.
INPUTS:
x -- x values at which to evaluate the basis element
i -- which element of the BSpline to return
d -- the order of derivative
lower -- optional lower limit of the set of basis
elements
upper -- optional upper limit of the set of basis
elements
OUTPUTS: y
y -- value of d-th derivative of the basis elements
of the BSpline at specified x values | basis | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def gram(self, d=0):
"""
Compute Gram inner product matrix, storing it in lower
triangular banded form.
The (i,j) entry is
G_ij = integral b_i^(d) b_j^(d)
where b_i are the basis elements of the BSpline and (d) is the
d-th derivative.
If d is a matrix then, it is assumed to specify a differential
operator as follows: the first row represents the order of derivative
with the second row the coefficient corresponding to that order.
For instance:
[[2, 3],
[3, 1]]
represents 3 * f^(2) + 1 * f^(3).
INPUTS:
d -- which derivative to apply to each basis element,
if d is a matrix, it is assumed to specify
a differential operator as above
OUTPUTS: gram
gram -- the matrix of inner products of (derivatives)
of the BSpline elements
"""
d = np.squeeze(d)
if np.asarray(d).shape == ():
self.g = _hbspline.gram(self.tau, self.m, int(d), int(d))
else:
d = np.asarray(d)
if d.shape[0] != 2:
raise ValueError("if d is not an integer, expecting a jx2 \
array with first row indicating order \
of derivative, second row coefficient in front.")
if d.shape == (2,):
d.shape = (2,1)
self.g = 0
for i in range(d.shape[1]):
for j in range(d.shape[1]):
self.g += d[1,i]* d[1,j] * _hbspline.gram(self.tau, self.m, int(d[0,i]), int(d[0,j]))
self.g = self.g.T
self.d = d
return np.nan_to_num(self.g) | Compute Gram inner product matrix, storing it in lower
triangular banded form.
The (i,j) entry is
G_ij = integral b_i^(d) b_j^(d)
where b_i are the basis elements of the BSpline and (d) is the
d-th derivative.
If d is a matrix then, it is assumed to specify a differential
operator as follows: the first row represents the order of derivative
with the second row the coefficient corresponding to that order.
For instance:
[[2, 3],
[3, 1]]
represents 3 * f^(2) + 1 * f^(3).
INPUTS:
d -- which derivative to apply to each basis element,
if d is a matrix, it is assumed to specify
a differential operator as above
OUTPUTS: gram
gram -- the matrix of inner products of (derivatives)
of the BSpline elements | gram | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def fit(self, y, x=None, weights=None, pen=0.):
"""
Fit the smoothing spline to a set of (x,y) pairs.
INPUTS:
y -- response variable
x -- if None, uses self.x
weights -- optional array of weights
pen -- constant in front of Gram matrix
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline.
ALGORITHM:
Formally, this solves a minimization:
fhat = ARGMIN_f SUM_i=1^n (y_i-f(x_i))^2 + pen * int f^(2)^2
int is integral. pen is lambda (from Hastie)
See Chapter 5 of
Hastie, Tibshirani and Friedman (2001). "The Elements of Statistical
Learning." Springer-Verlag. 536 pages.
for more details.
TODO:
Should add arbitrary derivative penalty instead of just
second derivative.
"""
banded = True
if x is None:
x = self._x
bt = self._basisx.copy()
else:
bt = self.basis(x)
if pen == 0.: # cannot use cholesky for singular matrices
banded = False
if x.shape != y.shape:
raise ValueError('x and y shape do not agree, by default x are \
the Bspline\'s internal knots')
if pen >= self.penmax:
pen = self.penmax
if weights is not None:
self.weights = weights
else:
self.weights = 1.
_w = np.sqrt(self.weights)
bt *= _w
# throw out rows with zeros (this happens at boundary points!)
mask = np.flatnonzero(1 - np.all(np.equal(bt, 0), axis=0))
bt = bt[:,mask]
y = y[mask]
self.df_total = y.shape[0]
bty = np.squeeze(np.dot(bt, _w * y))
self.N = y.shape[0]
if not banded:
self.btb = np.dot(bt, bt.T)
_g = _band2array(self.g, lower=1, symmetric=True)
self.coef, _, self.rank = L.lstsq(self.btb + pen*_g, bty)[0:3]
self.rank = min(self.rank, self.btb.shape[0])
del _g
else:
self.btb = np.zeros(self.g.shape, np.float64)
nband, nbasis = self.g.shape
for i in range(nbasis):
for k in range(min(nband, nbasis-i)):
self.btb[k,i] = (bt[i] * bt[i+k]).sum()
bty.shape = (1,bty.shape[0])
self.pen = pen
self.chol, self.coef = solveh_banded(self.btb +
pen*self.g,
bty, lower=1)
self.coef = np.squeeze(self.coef)
self.resid = y * self.weights - np.dot(self.coef, bt)
self.pen = pen
del bty
del mask
del bt | Fit the smoothing spline to a set of (x,y) pairs.
INPUTS:
y -- response variable
x -- if None, uses self.x
weights -- optional array of weights
pen -- constant in front of Gram matrix
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline.
ALGORITHM:
Formally, this solves a minimization:
fhat = ARGMIN_f SUM_i=1^n (y_i-f(x_i))^2 + pen * int f^(2)^2
int is integral. pen is lambda (from Hastie)
See Chapter 5 of
Hastie, Tibshirani and Friedman (2001). "The Elements of Statistical
Learning." Springer-Verlag. 536 pages.
for more details.
TODO:
Should add arbitrary derivative penalty instead of just
second derivative. | fit | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def gcv(self):
"""
Generalized cross-validation score of current fit.
Craven, P. and Wahba, G. "Smoothing noisy data with spline functions.
Estimating the correct degree of smoothing by
the method of generalized cross-validation."
Numerische Mathematik, 31(4), 377-403.
"""
norm_resid = (self.resid**2).sum()
return norm_resid / (self.df_total - self.trace()) | Generalized cross-validation score of current fit.
Craven, P. and Wahba, G. "Smoothing noisy data with spline functions.
Estimating the correct degree of smoothing by
the method of generalized cross-validation."
Numerische Mathematik, 31(4), 377-403. | gcv | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def df_resid(self):
"""
Residual degrees of freedom in the fit.
self.N - self.trace()
where self.N is the number of observations of last fit.
"""
return self.N - self.trace() | Residual degrees of freedom in the fit.
self.N - self.trace()
where self.N is the number of observations of last fit. | df_resid | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def df_fit(self):
"""
How many degrees of freedom used in the fit?
self.trace()
"""
return self.trace() | How many degrees of freedom used in the fit?
self.trace() | df_fit | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def trace(self):
"""
Trace of the smoothing matrix S(pen)
TODO: addin a reference to Wahba, and whoever else I used.
"""
if self.pen > 0:
_invband = _hbspline.invband(self.chol.copy())
tr = _trace_symbanded(_invband, self.btb, lower=1)
return tr
else:
return self.rank | Trace of the smoothing matrix S(pen)
TODO: addin a reference to Wahba, and whoever else I used. | trace | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def fit_target_df(self, y, x=None, df=None, weights=None, tol=1.0e-03,
apen=0, bpen=1.0e-03):
"""
Fit smoothing spline with approximately df degrees of freedom
used in the fit, i.e. so that self.trace() is approximately df.
Uses binary search strategy.
In general, df must be greater than the dimension of the null space
of the Gram inner product. For cubic smoothing splines, this means
that df > 2.
INPUTS:
y -- response variable
x -- if None, uses self.x
df -- target degrees of freedom
weights -- optional array of weights
tol -- (relative) tolerance for convergence
apen -- lower bound of penalty for binary search
bpen -- upper bound of penalty for binary search
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline.
"""
df = df or self.target_df
y.shape[0] - self.m
if hasattr(self, "pen"):
self.fit(y, x=x, weights=weights, pen=self.pen)
curdf = self.trace()
if np.fabs(curdf - df) / df < tol:
return
if curdf > df:
apen, bpen = self.pen, 2 * self.pen
else:
apen, bpen = 0., self.pen
while True:
curpen = 0.5 * (apen + bpen)
self.fit(y, x=x, weights=weights, pen=curpen)
curdf = self.trace()
if curdf > df:
apen, bpen = curpen, 2 * curpen
else:
apen, bpen = apen, curpen
if apen >= self.penmax:
raise ValueError("penalty too large, try setting penmax \
higher or decreasing df")
if np.fabs(curdf - df) / df < tol:
break | Fit smoothing spline with approximately df degrees of freedom
used in the fit, i.e. so that self.trace() is approximately df.
Uses binary search strategy.
In general, df must be greater than the dimension of the null space
of the Gram inner product. For cubic smoothing splines, this means
that df > 2.
INPUTS:
y -- response variable
x -- if None, uses self.x
df -- target degrees of freedom
weights -- optional array of weights
tol -- (relative) tolerance for convergence
apen -- lower bound of penalty for binary search
bpen -- upper bound of penalty for binary search
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline. | fit_target_df | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def fit_optimize_gcv(self, y, x=None, weights=None, tol=1.0e-03,
brack=(-100,20)):
"""
Fit smoothing spline trying to optimize GCV.
Try to find a bracketing interval for scipy.optimize.golden
based on bracket.
It is probably best to use target_df instead, as it is
sometimes difficult to find a bracketing interval.
INPUTS:
y -- response variable
x -- if None, uses self.x
df -- target degrees of freedom
weights -- optional array of weights
tol -- (relative) tolerance for convergence
brack -- an initial guess at the bracketing interval
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline.
"""
def _gcv(pen, y, x):
self.fit(y, x=x, pen=np.exp(pen))
a = self.gcv()
return a
golden(_gcv, args=(y,x), brack=brack, tol=tol) | Fit smoothing spline trying to optimize GCV.
Try to find a bracketing interval for scipy.optimize.golden
based on bracket.
It is probably best to use target_df instead, as it is
sometimes difficult to find a bracketing interval.
INPUTS:
y -- response variable
x -- if None, uses self.x
df -- target degrees of freedom
weights -- optional array of weights
tol -- (relative) tolerance for convergence
brack -- an initial guess at the bracketing interval
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline. | fit_optimize_gcv | python | statsmodels/statsmodels | statsmodels/sandbox/bspline.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/bspline.py | BSD-3-Clause |
def _make_exog_from_formula(result, focus_var, summaries, values, num_points):
"""
Create dataframes for exploring a fitted model as a function of one variable.
This works for models fit with a formula.
Returns
-------
dexog : data frame
A data frame in which the focus variable varies and the other variables
are fixed at specified or computed values.
fexog : data frame
The data frame `dexog` processed through the model formula.
fvals : ndarray
The values of the focus variable at which the prediction is made.
"""
model = result.model
exog = model.data.frame
if summaries is None:
summaries = {}
if values is None:
values = {}
if exog[focus_var].dtype is np.dtype('O'):
raise ValueError('focus variable may not have object type')
colnames = list(summaries.keys()) + list(values.keys()) + [focus_var]
dtypes = [exog[x].dtype for x in colnames]
# Check for variables whose values are not set either through
# `values` or `summaries`. Since the model data frame can contain
# extra variables not referenced in the formula RHS, this may not
# be a problem, so just warn. There is no obvious way to extract
# from a formula all the variable names that it references.
varl = set(exog.columns.tolist()) - {model.endog_names}
unmatched = varl - set(colnames)
unmatched = list(unmatched)
if len(unmatched) > 0:
warnings.warn("%s in data frame but not in summaries or values."
% ", ".join(["'%s'" % x for x in unmatched]),
ValueWarning)
# Initialize at zero so each column can be converted to any dtype.
ix = range(num_points)
fexog = pd.DataFrame(index=ix, columns=colnames)
for d, x in zip(dtypes, colnames):
fexog[x] = pd.Series(index=ix, dtype=d)
# The values of the 'focus variable' are a sequence of percentiles
pctls = np.linspace(0, 100, num_points).tolist()
fvals = np.percentile(exog[focus_var], pctls)
fvals = np.asarray(fvals)
fexog.loc[:, focus_var] = fvals
# The values of the other variables may be given by summary functions...
for ky in summaries.keys():
fexog.loc[:, ky] = summaries[ky](exog.loc[:, ky])
# or they may be provided as given values.
for ky in values.keys():
fexog[ky] = values[ky]
dexog = FormulaManager().get_matrices(model.data.model_spec, fexog, pandas=True)
return dexog, fexog, fvals | Create dataframes for exploring a fitted model as a function of one variable.
This works for models fit with a formula.
Returns
-------
dexog : data frame
A data frame in which the focus variable varies and the other variables
are fixed at specified or computed values.
fexog : data frame
The data frame `dexog` processed through the model formula.
fvals : ndarray
The values of the focus variable at which the prediction is made. | _make_exog_from_formula | python | statsmodels/statsmodels | statsmodels/sandbox/predict_functional.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/predict_functional.py | BSD-3-Clause |
def _make_exog_from_arrays(result, focus_var, summaries, values, num_points):
"""
Create dataframes for exploring a fitted model as a function of one variable.
This works for models fit without a formula.
Returns
-------
exog : data frame
A data frame in which the focus variable varies and the other variables
are fixed at specified or computed values.
"""
model = result.model
model_exog = model.exog
exog_names = model.exog_names
if summaries is None:
summaries = {}
if values is None:
values = {}
exog = np.zeros((num_points, model_exog.shape[1]))
# Check for variables whose values are not set either through
# `values` or `summaries`.
colnames = list(values.keys()) + list(summaries.keys()) + [focus_var]
unmatched = set(exog_names) - set(colnames)
unmatched = list(unmatched)
if len(unmatched) > 0:
warnings.warn("%s in model but not in `summaries` or `values`."
% ", ".join(["'%s'" % x for x in unmatched]),
ValueWarning)
# The values of the 'focus variable' are a sequence of percentiles
pctls = np.linspace(0, 100, num_points).tolist()
ix = exog_names.index(focus_var)
fvals = np.percentile(model_exog[:, ix], pctls)
exog[:, ix] = fvals
# The values of the other variables may be given by summary functions...
for ky in summaries.keys():
ix = exog_names.index(ky)
exog[:, ix] = summaries[ky](model_exog[:, ix])
# or they may be provided as given values.
for ky in values.keys():
ix = exog_names.index(ky)
exog[:, ix] = values[ky]
return exog, fvals | Create dataframes for exploring a fitted model as a function of one variable.
This works for models fit without a formula.
Returns
-------
exog : data frame
A data frame in which the focus variable varies and the other variables
are fixed at specified or computed values. | _make_exog_from_arrays | python | statsmodels/statsmodels | statsmodels/sandbox/predict_functional.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/predict_functional.py | BSD-3-Clause |
def _glm_basic_scr(result, exog, alpha):
"""
The basic SCR from (Sun et al. Annals of Statistics 2000).
Computes simultaneous confidence regions (SCR).
Parameters
----------
result : results instance
The fitted GLM results instance
exog : array_like
The exog values spanning the interval
alpha : float
`1 - alpha` is the coverage probability.
Returns
-------
An array with two columns, containing the lower and upper
confidence bounds, respectively.
Notes
-----
The rows of `exog` should be a sequence of covariate values
obtained by taking one 'free variable' x and varying it over an
interval. The matrix `exog` is thus the basis functions and any
other covariates evaluated as x varies.
"""
model = result.model
n = model.exog.shape[0]
# Get the Hessian without recomputing.
cov = result.cov_params()
hess = np.linalg.inv(cov)
# Proposition 3.1 of Sun et al.
A = hess / n
B = np.linalg.cholesky(A).T # Upper Cholesky triangle
# The variance and SD of the linear predictor at each row of exog.
sigma2 = (np.dot(exog, cov) * exog).sum(axis=1)
sigma = np.asarray(np.sqrt(sigma2))
# Calculate kappa_0 (formula 42 from Sun et al)
bz = np.linalg.solve(B.T, exog.T).T
bz /= np.sqrt(n)
bz /= sigma[:, None]
bzd = np.diff(bz, 1, axis=0)
bzdn = (bzd**2).sum(1)
kappa_0 = np.sqrt(bzdn).sum()
from scipy.stats.distributions import norm
# The root of this function is the multiplier for the confidence
# band, see Sun et al. equation 35.
def func(c):
return kappa_0 * np.exp(-c**2/2) / np.pi + 2*(1 - norm.cdf(c)) - alpha
from scipy.optimize import brentq
c, rslt = brentq(func, 1, 10, full_output=True)
if not rslt.converged:
raise ValueError("Root finding error in basic SCR")
return sigma, c | The basic SCR from (Sun et al. Annals of Statistics 2000).
Computes simultaneous confidence regions (SCR).
Parameters
----------
result : results instance
The fitted GLM results instance
exog : array_like
The exog values spanning the interval
alpha : float
`1 - alpha` is the coverage probability.
Returns
-------
An array with two columns, containing the lower and upper
confidence bounds, respectively.
Notes
-----
The rows of `exog` should be a sequence of covariate values
obtained by taking one 'free variable' x and varying it over an
interval. The matrix `exog` is thus the basis functions and any
other covariates evaluated as x varies. | _glm_basic_scr | python | statsmodels/statsmodels | statsmodels/sandbox/predict_functional.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/predict_functional.py | BSD-3-Clause |
def __call__(self, exog):
'''expected value ? check new GLM, same as mu for given exog
maybe remove this
'''
return self.linkinversepredict(exog) | expected value ? check new GLM, same as mu for given exog
maybe remove this | __call__ | python | statsmodels/statsmodels | statsmodels/sandbox/gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/gam.py | BSD-3-Clause |
def linkinversepredict(self, exog): #TODO what's the name in GLM
'''expected value ? check new GLM, same as mu for given exog
'''
return self.family.link.inverse(self.predict(exog)) | expected value ? check new GLM, same as mu for given exog | linkinversepredict | python | statsmodels/statsmodels | statsmodels/sandbox/gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/gam.py | BSD-3-Clause |
def predict(self, exog):
'''predict response, sum of smoothed components
TODO: What's this in the case of GLM, corresponds to X*beta ?
'''
#note: sum is here over axis=0,
#TODO: transpose in smoothed and sum over axis=1
#BUG: there is some inconsistent orientation somewhere
#temporary hack, will not work for 1d
#print dir(self)
#print 'self.nobs, self.k_vars', self.nobs, self.k_vars
exog_smoothed = self.smoothed(exog)
#print 'exog_smoothed.shape', exog_smoothed.shape
if exog_smoothed.shape[0] == self.k_vars:
import warnings
warnings.warn("old orientation, colvars, will go away",
FutureWarning)
return np.sum(self.smoothed(exog), axis=0) + self.alpha
if exog_smoothed.shape[1] == self.k_vars:
return np.sum(exog_smoothed, axis=1) + self.alpha
else:
raise ValueError('shape mismatch in predict') | predict response, sum of smoothed components
TODO: What's this in the case of GLM, corresponds to X*beta ? | predict | python | statsmodels/statsmodels | statsmodels/sandbox/gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/gam.py | BSD-3-Clause |
def smoothed(self, exog):
'''get smoothed prediction for each component
'''
#bug: with exog in predict I get a shape error
#print 'smoothed', exog.shape, self.smoothers[0].predict(exog).shape
#there was a mistake exog did not have column index i
return np.array([self.smoothers[i].predict(exog[:,i]) + self.offset[i]
#should not be a mistake because exog[:,i] is attached to smoother, but
#it is for different exog
#return np.array([self.smoothers[i].predict() + self.offset[i]
for i in range(exog.shape[1])]).T | get smoothed prediction for each component | smoothed | python | statsmodels/statsmodels | statsmodels/sandbox/gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/gam.py | BSD-3-Clause |
def _iter__(self):
'''initialize iteration ?, should be removed
'''
self.iter = 0
self.dev = np.inf
return self | initialize iteration ?, should be removed | _iter__ | python | statsmodels/statsmodels | statsmodels/sandbox/gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/gam.py | BSD-3-Clause |
def next(self):
'''internal calculation for one fit iteration
BUG: I think this does not improve, what is supposed to improve
offset does not seem to be used, neither an old alpha
The smoothers keep coef/params from previous iteration
'''
_results = self.results
Y = self.results.Y
mu = _results.predict(self.exog)
#TODO offset is never used ?
offset = np.zeros(self.exog.shape[1], np.float64)
alpha = (Y * self.weights).sum() / self.weights.sum()
for i in range(self.exog.shape[1]):
tmp = self.smoothers[i].predict()
#TODO: check what smooth needs to do
#smooth (alias for fit, fit given x to new y and attach
#print 'next shape', (Y - alpha - mu + tmp).shape
bad = np.isnan(Y - alpha - mu + tmp).any()
if bad: #temporary assert while debugging
print(Y, alpha, mu, tmp)
raise ValueError("nan encountered")
#self.smoothers[i].smooth(Y - alpha - mu + tmp,
self.smoothers[i].smooth(Y - mu + tmp,
weights=self.weights)
tmp2 = self.smoothers[i].predict() #fittedvalues of previous smooth/fit
self.results.offset[i] = -(tmp2*self.weights).sum() / self.weights.sum()
#self.offset used in smoothed
if DEBUG:
print(self.smoothers[i].params)
mu += tmp2 - tmp
#change setting offset here: tests still pass, offset equal to constant
#in component ??? what's the effect of offset
offset = self.results.offset
#print self.iter
#self.iter += 1 #missing incrementing of iter counter NOT
return Results(Y, alpha, self.exog, self.smoothers, self.family, offset) | internal calculation for one fit iteration
BUG: I think this does not improve, what is supposed to improve
offset does not seem to be used, neither an old alpha
The smoothers keep coef/params from previous iteration | next | python | statsmodels/statsmodels | statsmodels/sandbox/gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/gam.py | BSD-3-Clause |
def cont(self):
'''condition to continue iteration loop
Parameters
----------
tol
Returns
-------
cont : bool
If true, then iteration should be continued.
'''
self.iter += 1 #moved here to always count, not necessary
if DEBUG:
print(self.iter, self.results.Y.shape)
print(self.results.predict(self.exog).shape, self.weights.shape)
curdev = (((self.results.Y - self.results.predict(self.exog))**2) * self.weights).sum()
if self.iter > self.maxiter: #kill it, no max iterationoption
return False
if np.fabs((self.dev - curdev) / curdev) < self.rtol:
self.dev = curdev
return False
#self.iter += 1
self.dev = curdev
return True | condition to continue iteration loop
Parameters
----------
tol
Returns
-------
cont : bool
If true, then iteration should be continued. | cont | python | statsmodels/statsmodels | statsmodels/sandbox/gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/gam.py | BSD-3-Clause |
def df_resid(self):
'''degrees of freedom of residuals, ddof is sum of all smoothers df
'''
return self.results.Y.shape[0] - np.array([self.smoothers[i].df_fit() for i in range(self.exog.shape[1])]).sum() | degrees of freedom of residuals, ddof is sum of all smoothers df | df_resid | python | statsmodels/statsmodels | statsmodels/sandbox/gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/gam.py | BSD-3-Clause |
def estimate_scale(self):
'''estimate standard deviation of residuals
'''
#TODO: remove use of self.results.__call__
return ((self.results.Y - self.results(self.exog))**2).sum() / self.df_resid() | estimate standard deviation of residuals | estimate_scale | python | statsmodels/statsmodels | statsmodels/sandbox/gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/gam.py | BSD-3-Clause |
def fit(self, Y, rtol=1.0e-06, maxiter=30):
'''fit the model to a given endogenous variable Y
This needs to change for consistency with statsmodels
'''
self.rtol = rtol
self.maxiter = maxiter
#iter(self) # what does this do? anything?
self._iter__()
mu = 0
alpha = (Y * self.weights).sum() / self.weights.sum()
offset = np.zeros(self.exog.shape[1], np.float64)
for i in range(self.exog.shape[1]):
self.smoothers[i].smooth(Y - alpha - mu,
weights=self.weights)
tmp = self.smoothers[i].predict()
offset[i] = (tmp * self.weights).sum() / self.weights.sum()
tmp -= tmp.sum()
mu += tmp
self.results = Results(Y, alpha, self.exog, self.smoothers, self.family, offset)
while self.cont():
self.results = self.next()
if self.iter >= self.maxiter:
warnings.warn(iteration_limit_doc, IterationLimitWarning)
return self.results | fit the model to a given endogenous variable Y
This needs to change for consistency with statsmodels | fit | python | statsmodels/statsmodels | statsmodels/sandbox/gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/gam.py | BSD-3-Clause |
def estimate_scale(self, Y=None):
"""
Return Pearson\'s X^2 estimate of scale.
"""
if Y is None:
Y = self.Y
resid = Y - self.results.mu
return (np.power(resid, 2) / self.family.variance(self.results.mu)).sum() \
/ self.df_resid #TODO check this | Return Pearson\'s X^2 estimate of scale. | estimate_scale | python | statsmodels/statsmodels | statsmodels/sandbox/gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/gam.py | BSD-3-Clause |
def norm_lls(y, params):
'''normal loglikelihood given observations and mean mu and variance sigma2
Parameters
----------
y : ndarray, 1d
normally distributed random variable
params : ndarray, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
Returns
-------
lls : ndarray
contribution to loglikelihood for each observation
'''
mu, sigma2 = params.T
lls = -0.5*(np.log(2*np.pi) + np.log(sigma2) + (y-mu)**2/sigma2)
return lls | normal loglikelihood given observations and mean mu and variance sigma2
Parameters
----------
y : ndarray, 1d
normally distributed random variable
params : ndarray, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
Returns
-------
lls : ndarray
contribution to loglikelihood for each observation | norm_lls | python | statsmodels/statsmodels | statsmodels/sandbox/regression/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/tools.py | BSD-3-Clause |
def norm_lls_grad(y, params):
'''Jacobian of normal loglikelihood wrt mean mu and variance sigma2
Parameters
----------
y : ndarray, 1d
normally distributed random variable
params : ndarray, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
Returns
-------
grad : array (nobs, 2)
derivative of loglikelihood for each observation wrt mean in first
column, and wrt variance in second column
Notes
-----
this is actually the derivative wrt sigma not sigma**2, but evaluated
with parameter sigma2 = sigma**2
'''
mu, sigma2 = params.T
dllsdmu = (y-mu)/sigma2
dllsdsigma2 = ((y-mu)**2/sigma2 - 1)/np.sqrt(sigma2)
return np.column_stack((dllsdmu, dllsdsigma2)) | Jacobian of normal loglikelihood wrt mean mu and variance sigma2
Parameters
----------
y : ndarray, 1d
normally distributed random variable
params : ndarray, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
Returns
-------
grad : array (nobs, 2)
derivative of loglikelihood for each observation wrt mean in first
column, and wrt variance in second column
Notes
-----
this is actually the derivative wrt sigma not sigma**2, but evaluated
with parameter sigma2 = sigma**2 | norm_lls_grad | python | statsmodels/statsmodels | statsmodels/sandbox/regression/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/tools.py | BSD-3-Clause |
def mean_grad(x, beta):
'''gradient/Jacobian for d (x*beta)/ d beta
'''
return x | gradient/Jacobian for d (x*beta)/ d beta | mean_grad | python | statsmodels/statsmodels | statsmodels/sandbox/regression/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/tools.py | BSD-3-Clause |
def normgrad(y, x, params):
'''Jacobian of normal loglikelihood wrt mean mu and variance sigma2
Parameters
----------
y : ndarray, 1d
normally distributed random variable with mean x*beta, and variance sigma2
x : ndarray, 2d
explanatory variables, observation in rows, variables in columns
params : array_like, (nvars + 1)
array of coefficients and variance (beta, sigma2)
Returns
-------
grad : array (nobs, 2)
derivative of loglikelihood for each observation wrt mean in first
column, and wrt scale (sigma) in second column
assume params = (beta, sigma2)
Notes
-----
TODO: for heteroscedasticity need sigma to be a 1d array
'''
beta = params[:-1]
sigma2 = params[-1]*np.ones((len(y),1))
dmudbeta = mean_grad(x, beta)
mu = np.dot(x, beta)
#print(beta, sigma2)
params2 = np.column_stack((mu,sigma2))
dllsdms = norm_lls_grad(y,params2)
grad = np.column_stack((dllsdms[:,:1]*dmudbeta, dllsdms[:,:1]))
return grad | Jacobian of normal loglikelihood wrt mean mu and variance sigma2
Parameters
----------
y : ndarray, 1d
normally distributed random variable with mean x*beta, and variance sigma2
x : ndarray, 2d
explanatory variables, observation in rows, variables in columns
params : array_like, (nvars + 1)
array of coefficients and variance (beta, sigma2)
Returns
-------
grad : array (nobs, 2)
derivative of loglikelihood for each observation wrt mean in first
column, and wrt scale (sigma) in second column
assume params = (beta, sigma2)
Notes
-----
TODO: for heteroscedasticity need sigma to be a 1d array | normgrad | python | statsmodels/statsmodels | statsmodels/sandbox/regression/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/tools.py | BSD-3-Clause |
def tstd_lls(y, params, df):
'''t loglikelihood given observations and mean mu and variance sigma2 = 1
Parameters
----------
y : ndarray, 1d
normally distributed random variable
params : ndarray, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
df : int
degrees of freedom of the t distribution
Returns
-------
lls : ndarray
contribution to loglikelihood for each observation
Notes
-----
parametrized for garch
'''
mu, sigma2 = params.T
df = df*1.0
#lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
#lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2.)/sigma2) + 0.5 * np.log(sigma2)
lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2)/sigma2) + 0.5 * np.log(sigma2)
return lls | t loglikelihood given observations and mean mu and variance sigma2 = 1
Parameters
----------
y : ndarray, 1d
normally distributed random variable
params : ndarray, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
df : int
degrees of freedom of the t distribution
Returns
-------
lls : ndarray
contribution to loglikelihood for each observation
Notes
-----
parametrized for garch | tstd_lls | python | statsmodels/statsmodels | statsmodels/sandbox/regression/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/tools.py | BSD-3-Clause |
def norm_dlldy(y):
'''derivative of log pdf of standard normal with respect to y
'''
return -y | derivative of log pdf of standard normal with respect to y | norm_dlldy | python | statsmodels/statsmodels | statsmodels/sandbox/regression/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/tools.py | BSD-3-Clause |
def tstd_pdf(x, df):
'''pdf for standardized (not standard) t distribution, variance is one
'''
r = np.array(df*1.0)
Px = np.exp(special.gammaln((r+1)/2.)-special.gammaln(r/2.))/np.sqrt((r-2)*np.pi)
Px /= (1+(x**2)/(r-2))**((r+1)/2.)
return Px | pdf for standardized (not standard) t distribution, variance is one | tstd_pdf | python | statsmodels/statsmodels | statsmodels/sandbox/regression/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/tools.py | BSD-3-Clause |
def ts_lls(y, params, df):
'''t loglikelihood given observations and mean mu and variance sigma2 = 1
Parameters
----------
y : ndarray, 1d
normally distributed random variable
params : ndarray, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
df : int
degrees of freedom of the t distribution
Returns
-------
lls : ndarray
contribution to loglikelihood for each observation
Notes
-----
parametrized for garch
normalized/rescaled so that sigma2 is the variance
>>> df = 10; sigma = 1.
>>> stats.t.stats(df, loc=0., scale=sigma.*np.sqrt((df-2.)/df))
(array(0.0), array(1.0))
>>> sigma = np.sqrt(2.)
>>> stats.t.stats(df, loc=0., scale=sigma*np.sqrt((df-2.)/df))
(array(0.0), array(2.0))
'''
print(y, params, df)
mu, sigma2 = params.T
df = df*1.0
#lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
#lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2.)/sigma2) + 0.5 * np.log(sigma2)
lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df)*np.pi)
lls -= (df+1.)/2. * np.log(1. + (y-mu)**2/(df)/sigma2) + 0.5 * np.log(sigma2)
return lls | t loglikelihood given observations and mean mu and variance sigma2 = 1
Parameters
----------
y : ndarray, 1d
normally distributed random variable
params : ndarray, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
df : int
degrees of freedom of the t distribution
Returns
-------
lls : ndarray
contribution to loglikelihood for each observation
Notes
-----
parametrized for garch
normalized/rescaled so that sigma2 is the variance
>>> df = 10; sigma = 1.
>>> stats.t.stats(df, loc=0., scale=sigma.*np.sqrt((df-2.)/df))
(array(0.0), array(1.0))
>>> sigma = np.sqrt(2.)
>>> stats.t.stats(df, loc=0., scale=sigma*np.sqrt((df-2.)/df))
(array(0.0), array(2.0)) | ts_lls | python | statsmodels/statsmodels | statsmodels/sandbox/regression/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/tools.py | BSD-3-Clause |
def ts_dlldy(y, df):
'''derivative of log pdf of standard t with respect to y
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
df : array_like
degrees of freedom,shape parameters of log-likelihood function
of t distribution
Returns
-------
dlldy : ndarray
derivative of loglikelihood wrt random variable y evaluated at the
points given in y
Notes
-----
with mean 0 and scale 1, but variance is df/(df-2)
'''
df = df*1.
#(df+1)/2. / (1 + y**2/(df-2.)) * 2.*y/(df-2.)
#return -(df+1)/(df-2.) / (1 + y**2/(df-2.)) * y
return -(df+1)/(df) / (1 + y**2/(df)) * y | derivative of log pdf of standard t with respect to y
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
df : array_like
degrees of freedom,shape parameters of log-likelihood function
of t distribution
Returns
-------
dlldy : ndarray
derivative of loglikelihood wrt random variable y evaluated at the
points given in y
Notes
-----
with mean 0 and scale 1, but variance is df/(df-2) | ts_dlldy | python | statsmodels/statsmodels | statsmodels/sandbox/regression/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/tools.py | BSD-3-Clause |
def tstd_dlldy(y, df):
'''derivative of log pdf of standardized t with respect to y
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
df : array_like
degrees of freedom,shape parameters of log-likelihood function
of t distribution
Returns
-------
dlldy : ndarray
derivative of loglikelihood wrt random variable y evaluated at the
points given in y
Notes
-----
parametrized for garch, standardized to variance=1
'''
#(df+1)/2. / (1 + y**2/(df-2.)) * 2.*y/(df-2.)
return -(df+1)/(df-2.) / (1 + y**2/(df-2.)) * y | derivative of log pdf of standardized t with respect to y
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
df : array_like
degrees of freedom,shape parameters of log-likelihood function
of t distribution
Returns
-------
dlldy : ndarray
derivative of loglikelihood wrt random variable y evaluated at the
points given in y
Notes
-----
parametrized for garch, standardized to variance=1 | tstd_dlldy | python | statsmodels/statsmodels | statsmodels/sandbox/regression/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/tools.py | BSD-3-Clause |
def locscale_grad(y, loc, scale, dlldy, *args):
'''derivative of log-likelihood with respect to location and scale
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
loc : float
location parameter of distribution
scale : float
scale parameter of distribution
dlldy : function
derivative of loglikelihood fuction wrt. random variable x
args : array_like
shape parameters of log-likelihood function
Returns
-------
dlldloc : ndarray
derivative of loglikelihood wrt location evaluated at the
points given in y
dlldscale : ndarray
derivative of loglikelihood wrt scale evaluated at the
points given in y
'''
yst = (y-loc)/scale #ystandardized
dlldloc = -dlldy(yst, *args) / scale
dlldscale = -1./scale - dlldy(yst, *args) * (y-loc)/scale**2
return dlldloc, dlldscale | derivative of log-likelihood with respect to location and scale
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
loc : float
location parameter of distribution
scale : float
scale parameter of distribution
dlldy : function
derivative of loglikelihood fuction wrt. random variable x
args : array_like
shape parameters of log-likelihood function
Returns
-------
dlldloc : ndarray
derivative of loglikelihood wrt location evaluated at the
points given in y
dlldscale : ndarray
derivative of loglikelihood wrt scale evaluated at the
points given in y | locscale_grad | python | statsmodels/statsmodels | statsmodels/sandbox/regression/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/tools.py | BSD-3-Clause |
def xbetas(self, params):
'''these are the V_i
'''
res = np.empty((self.nobs, self.nchoices))
for choiceind in range(self.nchoices):
res[:,choiceind] = np.dot(self.exog_bychoices[choiceind],
params[self.beta_indices[choiceind]])
return res | these are the V_i | xbetas | python | statsmodels/statsmodels | statsmodels/sandbox/regression/runmnl.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/runmnl.py | BSD-3-Clause |
def xbetas(self, params):
'''these are the V_i
'''
res = np.empty((self.nobs, self.nchoices))
for choiceind in range(self.nchoices):
res[:,choiceind] = np.dot(self.exog_bychoices[choiceind],
params[self.beta_indices[choiceind]])
return res | these are the V_i | xbetas | python | statsmodels/statsmodels | statsmodels/sandbox/regression/runmnl.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/runmnl.py | BSD-3-Clause |
def calc_prob(self, tree, keys=None):
'''walking a tree bottom-up based on dictionary
'''
datadict = self.datadict
if isinstance(tree, tuple): #assumes leaves are int for choice index
name, subtree = tree
print(name, datadict[name])
print('subtree', subtree)
keys = []
if testxb:
branchsum = datadict[name]
else:
branchsum = name #0
for b in subtree:
print(b)
#branchsum += branch2(b)
branchsum = branchsum + self.calc_prob(b, keys)
print('branchsum', branchsum, keys)
for k in keys:
self.probs[k] = self.probs[k] + ['*' + name + '-prob']
else:
keys.append(tree)
self.probs[tree] = [tree + '-prob' +
'(%s)' % ', '.join(self.paramsind[tree])]
if testxb:
leavessum = sum(datadict[bi] for bi in tree)
print('final branch with', tree, ''.join(tree), leavessum) #sum(tree)
return leavessum #sum(xb[tree])
else:
return ''.join(tree) #sum(tree)
print('working on branch', tree, branchsum)
return branchsum | walking a tree bottom-up based on dictionary | calc_prob | python | statsmodels/statsmodels | statsmodels/sandbox/regression/runmnl.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/runmnl.py | BSD-3-Clause |
def fitbygroups(self):
'''Fit OLS regression for each group separately.
Returns
-------
results are attached
olsbygroup : dictionary of result instance
the returned regression results for each group
sigmabygroup : array (ngroups,) (this should be called sigma2group ??? check)
mse_resid for each group
weights : array (nobs,)
standard deviation of group extended to the original observations. This can
be used as weights in WLS for group-wise heteroscedasticity.
'''
olsbygroup = {}
sigmabygroup = []
for gi, group in enumerate(self.unique): #np.arange(len(self.unique))):
groupmask = self.groupsint == gi #group index
res = OLS(self.endog[groupmask], self.exog[groupmask]).fit()
olsbygroup[group] = res
sigmabygroup.append(res.mse_resid)
self.olsbygroup = olsbygroup
self.sigmabygroup = np.array(sigmabygroup)
self.weights = np.sqrt(self.sigmabygroup[self.groupsint]) #TODO:chk sqrt | Fit OLS regression for each group separately.
Returns
-------
results are attached
olsbygroup : dictionary of result instance
the returned regression results for each group
sigmabygroup : array (ngroups,) (this should be called sigma2group ??? check)
mse_resid for each group
weights : array (nobs,)
standard deviation of group extended to the original observations. This can
be used as weights in WLS for group-wise heteroscedasticity. | fitbygroups | python | statsmodels/statsmodels | statsmodels/sandbox/regression/onewaygls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/onewaygls.py | BSD-3-Clause |
def fitjoint(self):
'''fit a joint fixed effects model to all observations
The regression results are attached as `lsjoint`.
The contrasts for overall and pairwise tests for equality of coefficients are
attached as a dictionary `contrasts`. This also includes the contrasts for the test
that the coefficients of a level are zero. ::
>>> res.contrasts.keys()
[(0, 1), 1, 'all', 3, (1, 2), 2, (1, 3), (2, 3), (0, 3), (0, 2)]
The keys are based on the original names or labels of the groups.
TODO: keys can be numpy scalars and then the keys cannot be sorted
'''
if not hasattr(self, 'weights'):
self.fitbygroups()
groupdummy = (self.groupsint[:,None] == self.uniqueint).astype(int)
#order of dummy variables by variable - not used
#dummyexog = self.exog[:,:,None]*groupdummy[:,None,1:]
#order of dummy variables by grous - used
dummyexog = self.exog[:,None,:]*groupdummy[:,1:,None]
exog = np.c_[self.exog, dummyexog.reshape(self.exog.shape[0],-1)] #self.nobs ??
#Notes: I changed to drop first group from dummy
#instead I want one full set dummies
if self.het:
weights = self.weights
res = WLS(self.endog, exog, weights=weights).fit()
else:
res = OLS(self.endog, exog).fit()
self.lsjoint = res
contrasts = {}
nvars = self.exog.shape[1]
nparams = exog.shape[1]
ndummies = nparams - nvars
contrasts['all'] = np.c_[np.zeros((ndummies, nvars)), np.eye(ndummies)]
for groupind, group in enumerate(self.unique[1:]): #need enumerate if groups != groupsint
groupind = groupind + 1
contr = np.zeros((nvars, nparams))
contr[:,nvars*groupind:nvars*(groupind+1)] = np.eye(nvars)
contrasts[group] = contr
#save also for pairs, see next
contrasts[(self.unique[0], group)] = contr
#Note: I'm keeping some duplication for testing
pairs = np.triu_indices(len(self.unique),1)
for ind1,ind2 in zip(*pairs): #replace with group1, group2 in sorted(keys)
if ind1 == 0:
continue # need comparison with benchmark/normalization group separate
g1 = self.unique[ind1]
g2 = self.unique[ind2]
group = (g1, g2)
contr = np.zeros((nvars, nparams))
contr[:,nvars*ind1:nvars*(ind1+1)] = np.eye(nvars)
contr[:,nvars*ind2:nvars*(ind2+1)] = -np.eye(nvars)
contrasts[group] = contr
self.contrasts = contrasts | fit a joint fixed effects model to all observations
The regression results are attached as `lsjoint`.
The contrasts for overall and pairwise tests for equality of coefficients are
attached as a dictionary `contrasts`. This also includes the contrasts for the test
that the coefficients of a level are zero. ::
>>> res.contrasts.keys()
[(0, 1), 1, 'all', 3, (1, 2), 2, (1, 3), (2, 3), (0, 3), (0, 2)]
The keys are based on the original names or labels of the groups.
TODO: keys can be numpy scalars and then the keys cannot be sorted | fitjoint | python | statsmodels/statsmodels | statsmodels/sandbox/regression/onewaygls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/onewaygls.py | BSD-3-Clause |
def fitpooled(self):
'''fit the pooled model, which assumes there are no differences across groups
'''
if self.het:
if not hasattr(self, 'weights'):
self.fitbygroups()
weights = self.weights
res = WLS(self.endog, self.exog, weights=weights).fit()
else:
res = OLS(self.endog, self.exog).fit()
self.lspooled = res | fit the pooled model, which assumes there are no differences across groups | fitpooled | python | statsmodels/statsmodels | statsmodels/sandbox/regression/onewaygls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/onewaygls.py | BSD-3-Clause |
def ftest_summary(self):
'''run all ftests on the joint model
Returns
-------
fres : str
a string that lists the results of all individual f-tests
summarytable : list of tuples
contains (pair, (fvalue, pvalue,df_denom, df_num)) for each f-test
Note
----
This are the raw results and not formatted for nice printing.
'''
if not hasattr(self, 'lsjoint'):
self.fitjoint()
txt = []
summarytable = []
txt.append('F-test for equality of coefficients across groups')
fres = self.lsjoint.f_test(self.contrasts['all'])
txt.append(fres.__str__())
summarytable.append(('all',(fres.fvalue, fres.pvalue, fres.df_denom, fres.df_num)))
# for group in self.unique[1:]: #replace with group1, group2 in sorted(keys)
# txt.append('F-test for equality of coefficients between group'
# ' %s and group %s' % (group, '0'))
# fres = self.lsjoint.f_test(self.contrasts[group])
# txt.append(fres.__str__())
# summarytable.append((group,(fres.fvalue, fres.pvalue, fres.df_denom, fres.df_num)))
pairs = np.triu_indices(len(self.unique),1)
for ind1,ind2 in zip(*pairs): #replace with group1, group2 in sorted(keys)
g1 = self.unique[ind1]
g2 = self.unique[ind2]
txt.append('F-test for equality of coefficients between group'
' %s and group %s' % (g1, g2))
group = (g1, g2)
fres = self.lsjoint.f_test(self.contrasts[group])
txt.append(fres.__str__())
summarytable.append((group,(fres.fvalue, fres.pvalue, fres.df_denom, fres.df_num)))
self.summarytable = summarytable
return '\n'.join(txt), summarytable | run all ftests on the joint model
Returns
-------
fres : str
a string that lists the results of all individual f-tests
summarytable : list of tuples
contains (pair, (fvalue, pvalue,df_denom, df_num)) for each f-test
Note
----
This are the raw results and not formatted for nice printing. | ftest_summary | python | statsmodels/statsmodels | statsmodels/sandbox/regression/onewaygls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/onewaygls.py | BSD-3-Clause |
def print_summary(self, res):
'''printable string of summary
'''
groupind = res.groups
#res.fitjoint() #not really necessary, because called by ftest_summary
if hasattr(res, 'self.summarytable'):
summtable = self.summarytable
else:
_, summtable = res.ftest_summary()
#print ft[0] #skip because table is nicer
templ = \
'''Table of F-tests for overall or pairwise equality of coefficients'
%(tab)s
Notes: p-values are not corrected for many tests
(no Bonferroni correction)
* : reject at 5%% uncorrected confidence level
Null hypothesis: all or pairwise coefficient are the same'
Alternative hypothesis: all coefficients are different'
Comparison with stats.f_oneway
%(statsfow)s
Likelihood Ratio Test
%(lrtest)s
Null model: pooled all coefficients are the same across groups,'
Alternative model: all coefficients are allowed to be different'
not verified but looks close to f-test result'
OLS parameters by group from individual, separate ols regressions'
%(olsbg)s
for group in sorted(res.olsbygroup):
r = res.olsbygroup[group]
print group, r.params
Check for heteroscedasticity, '
variance and standard deviation for individual regressions'
%(grh)s
variance ', res.sigmabygroup
standard dev', np.sqrt(res.sigmabygroup)
'''
from statsmodels.iolib import SimpleTable
resvals = {}
resvals['tab'] = str(SimpleTable([([f'{row[0]!r}']
+ list(row[1])
+ ['*']*(row[1][1]>0.5).item() ) for row in summtable],
headers=['pair', 'F-statistic','p-value','df_denom',
'df_num']))
resvals['statsfow'] = str(stats.f_oneway(*[res.endog[groupind==gr] for gr in
res.unique]))
#resvals['lrtest'] = str(res.lr_test())
resvals['lrtest'] = str(SimpleTable([res.lr_test()],
headers=['likelihood ratio', 'p-value', 'df'] ))
resvals['olsbg'] = str(SimpleTable([[group]
+ res.olsbygroup[group].params.tolist()
for group in sorted(res.olsbygroup)]))
resvals['grh'] = str(SimpleTable(np.vstack([res.sigmabygroup,
np.sqrt(res.sigmabygroup)]),
headers=res.unique.tolist()))
return templ % resvals | printable string of summary | print_summary | python | statsmodels/statsmodels | statsmodels/sandbox/regression/onewaygls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/onewaygls.py | BSD-3-Clause |
def branch(tree):
'''walking a tree bottom-up
'''
if not isinstance(tree[0], int): #assumes leaves are int for choice index
branchsum = 0
for b in tree:
branchsum += branch(b)
else:
print(tree)
print('final branch with', tree, sum(tree))
if testxb:
return sum(xb[tree])
else:
return sum(tree)
print('working on branch', tree, branchsum)
return branchsum | walking a tree bottom-up | branch | python | statsmodels/statsmodels | statsmodels/sandbox/regression/try_treewalker.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/try_treewalker.py | BSD-3-Clause |
def branch2(tree):
'''walking a tree bottom-up based on dictionary
'''
if isinstance(tree, tuple): #assumes leaves are int for choice index
name, subtree = tree
print(name, data2[name])
print('subtree', subtree)
if testxb:
branchsum = data2[name]
else:
branchsum = name #0
for b in subtree:
#branchsum += branch2(b)
branchsum = branchsum + branch2(b)
else:
leavessum = sum(data2[bi] for bi in tree)
print('final branch with', tree, ''.join(tree), leavessum) #sum(tree)
if testxb:
return leavessum #sum(xb[tree])
else:
return ''.join(tree) #sum(tree)
print('working on branch', tree, branchsum)
return branchsum | walking a tree bottom-up based on dictionary | branch2 | python | statsmodels/statsmodels | statsmodels/sandbox/regression/try_treewalker.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/try_treewalker.py | BSD-3-Clause |
def groupstatsbin(factors, values):
'''uses np.bincount, assumes factors/labels are integers
'''
ix,rind = np.unique(factors, return_inverse=1)
gcount = np.bincount(rind)
gmean = np.bincount(rind, weights=values)/ (1.0*gcount)
meanarr = gmean[rind]
withinvar = np.bincount(rind, weights=(values-meanarr)**2) / (1.0*gcount)
withinvararr = withinvar[rind]
return gcount, gmean , meanarr, withinvar, withinvararr | uses np.bincount, assumes factors/labels are integers | groupstatsbin | python | statsmodels/statsmodels | statsmodels/sandbox/regression/try_catdata.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/try_catdata.py | BSD-3-Clause |
def convertlabels(ys, indices=None):
'''convert labels based on multiple variables or string labels to unique
index labels 0,1,2,...,nk-1 where nk is the number of distinct labels
'''
if indices is None:
ylabel = ys
else:
idx = np.array(indices)
if idx.size > 1 and ys.ndim == 2:
ylabel = np.array(['@%s@' % ii[:2].tostring() for ii in ys])[:,np.newaxis]
#alternative
## if ys[:,idx].dtype.kind == 'S':
## ylabel = nd.array([' '.join(ii[:2]) for ii in ys])[:,np.newaxis]
else:
# there might be a problem here
ylabel = ys
unil, unilinv = np.unique(ylabel, return_index=False, return_inverse=True)
return unilinv, np.arange(len(unil)), unil | convert labels based on multiple variables or string labels to unique
index labels 0,1,2,...,nk-1 where nk is the number of distinct labels | convertlabels | python | statsmodels/statsmodels | statsmodels/sandbox/regression/try_catdata.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/try_catdata.py | BSD-3-Clause |
def groupsstats_1d(y, x, labelsunique):
'''use ndimage to get fast mean and variance'''
labelmeans = np.array(ndimage.mean(x, labels=y, index=labelsunique))
labelvars = np.array(ndimage.var(x, labels=y, index=labelsunique))
return labelmeans, labelvars | use ndimage to get fast mean and variance | groupsstats_1d | python | statsmodels/statsmodels | statsmodels/sandbox/regression/try_catdata.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/try_catdata.py | BSD-3-Clause |
def pdf(x, mu, sigma):
"""Return the probability density function as an expression in x"""
#x = sy.sympify(x)
return 1/(sigma*sy.sqrt(2*sy.pi)) * sy.exp(-(x-mu)**2 / (2*sigma**2)) | Return the probability density function as an expression in x | pdf | python | statsmodels/statsmodels | statsmodels/sandbox/regression/sympy_diff.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/sympy_diff.py | BSD-3-Clause |
def cdf(x, mu, sigma):
"""Return the cumulative density function as an expression in x"""
#x = sy.sympify(x)
return (1+sy.erf((x-mu)/(sigma*sy.sqrt(2))))/2 | Return the cumulative density function as an expression in x | cdf | python | statsmodels/statsmodels | statsmodels/sandbox/regression/sympy_diff.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/sympy_diff.py | BSD-3-Clause |
def __init__(self, x, y=None, kernel=kernel_rbf,
scale=0.5, ridgecoeff = 1e-10, **kwds ):
'''
Parameters
----------
x : 2d array (N,K)
data array of explanatory variables, columns represent variables
rows represent observations
y : 2d array (N,1) (optional)
endogenous variable that should be fitted or predicted
can alternatively be specified as parameter to fit method
kernel : function, default: kernel_rbf
kernel: (x1,x2)->kernel matrix is a function that takes as parameter
two column arrays and return the kernel or distance matrix
scale : float (optional)
smoothing parameter for the rbf kernel
ridgecoeff : float (optional)
coefficient that is multiplied with the identity matrix in the
ridge regression
Notes
-----
After initialization, kernel matrix is calculated and if y is given
as parameter then also the linear regression parameter and the
fitted or estimated y values, yest, are calculated. yest is available
as an attribute in this case.
Both scale and the ridge coefficient smooth the fitted curve.
'''
self.x = x
self.kernel = kernel
self.scale = scale
self.ridgecoeff = ridgecoeff
self.distxsample = kernel(x,x,scale=scale)
self.Kinv = np.linalg.inv(self.distxsample +
np.eye(*self.distxsample.shape)*ridgecoeff)
if y is not None:
self.y = y
self.yest = self.fit(y) | Parameters
----------
x : 2d array (N,K)
data array of explanatory variables, columns represent variables
rows represent observations
y : 2d array (N,1) (optional)
endogenous variable that should be fitted or predicted
can alternatively be specified as parameter to fit method
kernel : function, default: kernel_rbf
kernel: (x1,x2)->kernel matrix is a function that takes as parameter
two column arrays and return the kernel or distance matrix
scale : float (optional)
smoothing parameter for the rbf kernel
ridgecoeff : float (optional)
coefficient that is multiplied with the identity matrix in the
ridge regression
Notes
-----
After initialization, kernel matrix is calculated and if y is given
as parameter then also the linear regression parameter and the
fitted or estimated y values, yest, are calculated. yest is available
as an attribute in this case.
Both scale and the ridge coefficient smooth the fitted curve. | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/regression/kernridgeregress_class.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/kernridgeregress_class.py | BSD-3-Clause |
def fit(self,y):
'''fit the training explanatory variables to a sample ouput variable'''
self.parest = np.dot(self.Kinv, y) #self.kernel(y,y,scale=self.scale))
yhat = np.dot(self.distxsample,self.parest)
return yhat | fit the training explanatory variables to a sample ouput variable | fit | python | statsmodels/statsmodels | statsmodels/sandbox/regression/kernridgeregress_class.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/kernridgeregress_class.py | BSD-3-Clause |
def predict(self,x):
'''predict new y values for a given array of explanatory variables'''
self.xpredict = x
distxpredict = self.kernel(x, self.x, scale=self.scale)
self.ypredict = np.dot(distxpredict, self.parest)
return self.ypredict | predict new y values for a given array of explanatory variables | predict | python | statsmodels/statsmodels | statsmodels/sandbox/regression/kernridgeregress_class.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/kernridgeregress_class.py | BSD-3-Clause |
def plot(self, y, plt=plt ):
'''some basic plots'''
#todo return proper graph handles
plt.figure()
plt.plot(self.x,self.y, 'bo-', self.x, self.yest, 'r.-')
plt.title('sample (training) points')
plt.figure()
plt.plot(self.xpredict,y,'bo-',self.xpredict,self.ypredict,'r.-')
plt.title('all points') | some basic plots | plot | python | statsmodels/statsmodels | statsmodels/sandbox/regression/kernridgeregress_class.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/kernridgeregress_class.py | BSD-3-Clause |
def data2dummy(x, returnall=False):
'''convert array of categories to dummy variables
by default drops dummy variable for last category
uses ravel, 1d only'''
x = x.ravel()
groups = np.unique(x)
if returnall:
return (x[:, None] == groups).astype(int)
else:
return (x[:, None] == groups).astype(int)[:,:-1] | convert array of categories to dummy variables
by default drops dummy variable for last category
uses ravel, 1d only | data2dummy | python | statsmodels/statsmodels | statsmodels/sandbox/regression/ols_anova_original.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/ols_anova_original.py | BSD-3-Clause |
def data2proddummy(x):
'''creates product dummy variables from 2 columns of 2d array
drops last dummy variable, but not from each category
singular with simple dummy variable but not with constant
quickly written, no safeguards
'''
#brute force, assumes x is 2d
#replace with encoding if possible
groups = np.unique(lmap(tuple, x.tolist()))
#includes singularity with additive factors
return (x==groups[:,None,:]).all(-1).T.astype(int)[:,:-1] | creates product dummy variables from 2 columns of 2d array
drops last dummy variable, but not from each category
singular with simple dummy variable but not with constant
quickly written, no safeguards | data2proddummy | python | statsmodels/statsmodels | statsmodels/sandbox/regression/ols_anova_original.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/ols_anova_original.py | BSD-3-Clause |
def data2groupcont(x1,x2):
'''create dummy continuous variable
Parameters
----------
x1 : 1d array
label or group array
x2 : 1d array (float)
continuous variable
Notes
-----
useful for group specific slope coefficients in regression
'''
if x2.ndim == 1:
x2 = x2[:,None]
dummy = data2dummy(x1, returnall=True)
return dummy * x2 | create dummy continuous variable
Parameters
----------
x1 : 1d array
label or group array
x2 : 1d array (float)
continuous variable
Notes
-----
useful for group specific slope coefficients in regression | data2groupcont | python | statsmodels/statsmodels | statsmodels/sandbox/regression/ols_anova_original.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/ols_anova_original.py | BSD-3-Clause |
def anovadict(res):
'''update regression results dictionary with ANOVA specific statistics
not checked for completeness
'''
ad = {}
ad.update(res.__dict__)
anova_attr = ['df_model', 'df_resid', 'ess', 'ssr','uncentered_tss',
'mse_model', 'mse_resid', 'mse_total', 'fvalue', 'f_pvalue',
'rsquared']
for key in anova_attr:
ad[key] = getattr(res, key)
ad['nobs'] = res.model.nobs
ad['ssmwithmean'] = res.uncentered_tss - res.ssr
return ad | update regression results dictionary with ANOVA specific statistics
not checked for completeness | anovadict | python | statsmodels/statsmodels | statsmodels/sandbox/regression/ols_anova_original.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/ols_anova_original.py | BSD-3-Clause |
def form2design(ss, data):
'''convert string formula to data dictionary
ss : str
* I : add constant
* varname : for simple varnames data is used as is
* F:varname : create dummy variables for factor varname
* P:varname1*varname2 : create product dummy variables for
varnames
* G:varname1*varname2 : create product between factor and
continuous variable
data : dict or structured array
data set, access of variables by name as in dictionaries
Returns
-------
vars : dictionary
dictionary of variables with converted dummy variables
names : list
list of names, product (P:) and grouped continuous
variables (G:) have name by joining individual names
sorted according to input
Examples
--------
>>> xx, n = form2design('I a F:b P:c*d G:c*f', testdata)
>>> xx.keys()
['a', 'b', 'const', 'cf', 'cd']
>>> n
['const', 'a', 'b', 'cd', 'cf']
Notes
-----
with sorted dict, separate name list would not be necessary
'''
vars = {}
names = []
for item in ss.split():
if item == 'I':
vars['const'] = np.ones(data.shape[0])
names.append('const')
elif ':' not in item:
vars[item] = data[item]
names.append(item)
elif item[:2] == 'F:':
v = item.split(':')[1]
vars[v] = data2dummy(data[v])
names.append(v)
elif item[:2] == 'P:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2proddummy(np.c_[data[v[0]],data[v[1]]])
names.append(''.join(v))
elif item[:2] == 'G:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2groupcont(data[v[0]], data[v[1]])
names.append(''.join(v))
else:
raise ValueError('unknown expression in formula')
return vars, names | convert string formula to data dictionary
ss : str
* I : add constant
* varname : for simple varnames data is used as is
* F:varname : create dummy variables for factor varname
* P:varname1*varname2 : create product dummy variables for
varnames
* G:varname1*varname2 : create product between factor and
continuous variable
data : dict or structured array
data set, access of variables by name as in dictionaries
Returns
-------
vars : dictionary
dictionary of variables with converted dummy variables
names : list
list of names, product (P:) and grouped continuous
variables (G:) have name by joining individual names
sorted according to input
Examples
--------
>>> xx, n = form2design('I a F:b P:c*d G:c*f', testdata)
>>> xx.keys()
['a', 'b', 'const', 'cf', 'cd']
>>> n
['const', 'a', 'b', 'cd', 'cf']
Notes
-----
with sorted dict, separate name list would not be necessary | form2design | python | statsmodels/statsmodels | statsmodels/sandbox/regression/ols_anova_original.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/ols_anova_original.py | BSD-3-Clause |
def dropname(ss, li):
'''drop names from a list of strings,
names to drop are in space delimited list
does not change original list
'''
newli = li[:]
for item in ss.split():
newli.remove(item)
return newli | drop names from a list of strings,
names to drop are in space delimited list
does not change original list | dropname | python | statsmodels/statsmodels | statsmodels/sandbox/regression/ols_anova_original.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/ols_anova_original.py | BSD-3-Clause |
def data2dummy(x, returnall=False):
'''convert array of categories to dummy variables
by default drops dummy variable for last category
uses ravel, 1d only'''
x = x.ravel()
groups = np.unique(x)
if returnall:
return (x[:, None] == groups).astype(int)
else:
return (x[:, None] == groups).astype(int)[:,:-1] | convert array of categories to dummy variables
by default drops dummy variable for last category
uses ravel, 1d only | data2dummy | python | statsmodels/statsmodels | statsmodels/sandbox/regression/try_ols_anova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/try_ols_anova.py | BSD-3-Clause |
def data2proddummy(x):
'''creates product dummy variables from 2 columns of 2d array
drops last dummy variable, but not from each category
singular with simple dummy variable but not with constant
quickly written, no safeguards
'''
#brute force, assumes x is 2d
#replace with encoding if possible
groups = np.unique(lmap(tuple, x.tolist()))
#includes singularity with additive factors
return (x==groups[:,None,:]).all(-1).T.astype(int)[:,:-1] | creates product dummy variables from 2 columns of 2d array
drops last dummy variable, but not from each category
singular with simple dummy variable but not with constant
quickly written, no safeguards | data2proddummy | python | statsmodels/statsmodels | statsmodels/sandbox/regression/try_ols_anova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/try_ols_anova.py | BSD-3-Clause |
def data2groupcont(x1,x2):
'''create dummy continuous variable
Parameters
----------
x1 : 1d array
label or group array
x2 : 1d array (float)
continuous variable
Notes
-----
useful for group specific slope coefficients in regression
'''
if x2.ndim == 1:
x2 = x2[:,None]
dummy = data2dummy(x1, returnall=True)
return dummy * x2 | create dummy continuous variable
Parameters
----------
x1 : 1d array
label or group array
x2 : 1d array (float)
continuous variable
Notes
-----
useful for group specific slope coefficients in regression | data2groupcont | python | statsmodels/statsmodels | statsmodels/sandbox/regression/try_ols_anova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/try_ols_anova.py | BSD-3-Clause |
def anovadict(res):
'''update regression results dictionary with ANOVA specific statistics
not checked for completeness
'''
ad = {}
ad.update(res.__dict__) #dict does not work with cached attributes
anova_attr = ['df_model', 'df_resid', 'ess', 'ssr','uncentered_tss',
'mse_model', 'mse_resid', 'mse_total', 'fvalue', 'f_pvalue',
'rsquared']
for key in anova_attr:
ad[key] = getattr(res, key)
ad['nobs'] = res.model.nobs
ad['ssmwithmean'] = res.uncentered_tss - res.ssr
return ad | update regression results dictionary with ANOVA specific statistics
not checked for completeness | anovadict | python | statsmodels/statsmodels | statsmodels/sandbox/regression/try_ols_anova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/try_ols_anova.py | BSD-3-Clause |
def form2design(ss, data):
'''convert string formula to data dictionary
ss : str
* I : add constant
* varname : for simple varnames data is used as is
* F:varname : create dummy variables for factor varname
* P:varname1*varname2 : create product dummy variables for
varnames
* G:varname1*varname2 : create product between factor and
continuous variable
data : dict or structured array
data set, access of variables by name as in dictionaries
Returns
-------
vars : dictionary
dictionary of variables with converted dummy variables
names : list
list of names, product (P:) and grouped continuous
variables (G:) have name by joining individual names
sorted according to input
Examples
--------
>>> xx, n = form2design('I a F:b P:c*d G:c*f', testdata)
>>> xx.keys()
['a', 'b', 'const', 'cf', 'cd']
>>> n
['const', 'a', 'b', 'cd', 'cf']
Notes
-----
with sorted dict, separate name list would not be necessary
'''
vars = {}
names = []
for item in ss.split():
if item == 'I':
vars['const'] = np.ones(data.shape[0])
names.append('const')
elif ':' not in item:
vars[item] = data[item]
names.append(item)
elif item[:2] == 'F:':
v = item.split(':')[1]
vars[v] = data2dummy(data[v])
names.append(v)
elif item[:2] == 'P:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2proddummy(np.c_[data[v[0]],data[v[1]]])
names.append(''.join(v))
elif item[:2] == 'G:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2groupcont(data[v[0]], data[v[1]])
names.append(''.join(v))
else:
raise ValueError('unknown expression in formula')
return vars, names | convert string formula to data dictionary
ss : str
* I : add constant
* varname : for simple varnames data is used as is
* F:varname : create dummy variables for factor varname
* P:varname1*varname2 : create product dummy variables for
varnames
* G:varname1*varname2 : create product between factor and
continuous variable
data : dict or structured array
data set, access of variables by name as in dictionaries
Returns
-------
vars : dictionary
dictionary of variables with converted dummy variables
names : list
list of names, product (P:) and grouped continuous
variables (G:) have name by joining individual names
sorted according to input
Examples
--------
>>> xx, n = form2design('I a F:b P:c*d G:c*f', testdata)
>>> xx.keys()
['a', 'b', 'const', 'cf', 'cd']
>>> n
['const', 'a', 'b', 'cd', 'cf']
Notes
-----
with sorted dict, separate name list would not be necessary | form2design | python | statsmodels/statsmodels | statsmodels/sandbox/regression/try_ols_anova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/try_ols_anova.py | BSD-3-Clause |
def dropname(ss, li):
'''drop names from a list of strings,
names to drop are in space delimited list
does not change original list
'''
newli = li[:]
for item in ss.split():
newli.remove(item)
return newli | drop names from a list of strings,
names to drop are in space delimited list
does not change original list | dropname | python | statsmodels/statsmodels | statsmodels/sandbox/regression/try_ols_anova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/try_ols_anova.py | BSD-3-Clause |
def fit(self, pen_weight=1., cov_type='sandwich', use_t=True):
"""Estimate parameters and return results instance
Parameters
----------
pen_weight : float
penalization factor for the restriction, default is 1.
cov_type : str, 'data-prior' or 'sandwich'
'data-prior' assumes that the stochastic restriction reflects a
previous sample. The covariance matrix of the parameter estimate
is in this case the same form as the one of GLS.
The covariance matrix for cov_type='sandwich' treats the stochastic
restriction (R and q) as fixed and has a sandwich form analogously
to M-estimators.
Returns
-------
results : TheilRegressionResults instance
Notes
-----
cov_params for cov_type data-prior, is calculated as
.. math:: \\sigma^2 A^{-1}
cov_params for cov_type sandwich, is calculated as
.. math:: \\sigma^2 A^{-1} (X'X) A^{-1}
where :math:`A = X' \\Sigma X + \\lambda \\sigma^2 R' \\Simga_p^{-1} R`
:math:`\\sigma^2` is an estimate of the error variance.
:math:`\\sigma^2` inside A is replaced by the estimate from the initial
GLS estimate. :math:`\\sigma^2` in cov_params is obtained from the
residuals of the final estimate.
The sandwich form of the covariance estimator is not robust to
misspecified heteroscedasticity or autocorrelation.
"""
lambd = pen_weight
#this does duplicate transformation, but I need resid not wresid
res_gls = GLS(self.endog, self.exog, sigma=self.sigma).fit()
self.res_gls = res_gls
sigma2_e = res_gls.mse_resid
r_matrix = self.r_matrix
q_matrix = self.q_matrix
sigma_prior_inv = self.sigma_prior_inv
x = self.wexog
y = self.wendog[:,None]
#why are sigma2_e * lambd multiplied, not ratio?
#larger lambd -> stronger prior (it's not the variance)
# Bayesian: lambd is precision = 1/sigma2_prior
#print('lambd inside fit', lambd
xx = np.dot(x.T, x)
xpx = xx + \
sigma2_e * lambd * np.dot(r_matrix.T, np.dot(sigma_prior_inv, r_matrix))
xpy = np.dot(x.T, y) + \
sigma2_e * lambd * np.dot(r_matrix.T, np.dot(sigma_prior_inv, q_matrix))
#xpy = xpy[:,None]
xpxi = np.linalg.pinv(xpx, rcond=1e-15**2) #to match pinv(x) in OLS case
xpxi_sandwich = xpxi.dot(xx).dot(xpxi)
params = np.dot(xpxi, xpy) #or solve
params = np.squeeze(params)
# normalized_cov_params should have sandwich form xpxi @ xx @ xpxi
if cov_type == 'sandwich':
normalized_cov_params = xpxi_sandwich
elif cov_type == 'data-prior':
normalized_cov_params = xpxi #why attach it to self, i.e. model?
else:
raise ValueError("cov_type has to be 'sandwich' or 'data-prior'")
self.normalized_cov_params = xpxi_sandwich
self.xpxi = xpxi
self.sigma2_e = sigma2_e
lfit = TheilRegressionResults(self, params,
normalized_cov_params=normalized_cov_params, use_t=use_t)
lfit.penalization_factor = lambd
return lfit | Estimate parameters and return results instance
Parameters
----------
pen_weight : float
penalization factor for the restriction, default is 1.
cov_type : str, 'data-prior' or 'sandwich'
'data-prior' assumes that the stochastic restriction reflects a
previous sample. The covariance matrix of the parameter estimate
is in this case the same form as the one of GLS.
The covariance matrix for cov_type='sandwich' treats the stochastic
restriction (R and q) as fixed and has a sandwich form analogously
to M-estimators.
Returns
-------
results : TheilRegressionResults instance
Notes
-----
cov_params for cov_type data-prior, is calculated as
.. math:: \\sigma^2 A^{-1}
cov_params for cov_type sandwich, is calculated as
.. math:: \\sigma^2 A^{-1} (X'X) A^{-1}
where :math:`A = X' \\Sigma X + \\lambda \\sigma^2 R' \\Simga_p^{-1} R`
:math:`\\sigma^2` is an estimate of the error variance.
:math:`\\sigma^2` inside A is replaced by the estimate from the initial
GLS estimate. :math:`\\sigma^2` in cov_params is obtained from the
residuals of the final estimate.
The sandwich form of the covariance estimator is not robust to
misspecified heteroscedasticity or autocorrelation. | fit | python | statsmodels/statsmodels | statsmodels/sandbox/regression/penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/penalized.py | BSD-3-Clause |
def select_pen_weight(self, method='aicc', start_params=1., optim_args=None):
"""find penalization factor that minimizes gcv or an information criterion
Parameters
----------
method : str
the name of an attribute of the results class. Currently the following
are available aic, aicc, bic, gc and gcv.
start_params : float
starting values for the minimization to find the penalization factor
`lambd`. Not since there can be local minima, it is best to try
different starting values.
optim_args : None or dict
optimization keyword arguments used with `scipy.optimize.fmin`
Returns
-------
min_pen_weight : float
The penalization factor at which the target criterion is (locally)
minimized.
Notes
-----
This uses `scipy.optimize.fmin` as optimizer.
"""
if optim_args is None:
optim_args = {}
#this does not make sense, since number of parameters stays unchanged
# information criteria changes if we use df_model based on trace(hat_matrix)
#need leave-one-out, gcv; or some penalization for weak priors
#added extra penalization for lambd
def get_ic(lambd):
# this can be optimized more
# for pure Ridge we can keep the eigenvector decomposition
return getattr(self.fit(lambd), method)
from scipy import optimize
lambd = optimize.fmin(get_ic, start_params, **optim_args)
return lambd | find penalization factor that minimizes gcv or an information criterion
Parameters
----------
method : str
the name of an attribute of the results class. Currently the following
are available aic, aicc, bic, gc and gcv.
start_params : float
starting values for the minimization to find the penalization factor
`lambd`. Not since there can be local minima, it is best to try
different starting values.
optim_args : None or dict
optimization keyword arguments used with `scipy.optimize.fmin`
Returns
-------
min_pen_weight : float
The penalization factor at which the target criterion is (locally)
minimized.
Notes
-----
This uses `scipy.optimize.fmin` as optimizer. | select_pen_weight | python | statsmodels/statsmodels | statsmodels/sandbox/regression/penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/penalized.py | BSD-3-Clause |
def hatmatrix_diag(self):
'''diagonal of hat matrix
diag(X' xpxi X)
where xpxi = (X'X + sigma2_e * lambd * sigma_prior)^{-1}
Notes
-----
uses wexog, so this includes weights or sigma - check this case
not clear whether I need to multiply by sigmahalf, i.e.
(W^{-0.5} X) (X' W X)^{-1} (W^{-0.5} X)' or
(W X) (X' W X)^{-1} (W X)'
projection y_hat = H y or in terms of transformed variables (W^{-0.5} y)
might be wrong for WLS and GLS case
'''
# TODO is this still correct with sandwich normalized_cov_params, I guess not
xpxi = self.model.normalized_cov_params
#something fishy with self.normalized_cov_params in result, does not update
#print(self.model.wexog.shape, np.dot(xpxi, self.model.wexog.T).shape
return (self.model.wexog * np.dot(xpxi, self.model.wexog.T).T).sum(1) | diagonal of hat matrix
diag(X' xpxi X)
where xpxi = (X'X + sigma2_e * lambd * sigma_prior)^{-1}
Notes
-----
uses wexog, so this includes weights or sigma - check this case
not clear whether I need to multiply by sigmahalf, i.e.
(W^{-0.5} X) (X' W X)^{-1} (W^{-0.5} X)' or
(W X) (X' W X)^{-1} (W X)'
projection y_hat = H y or in terms of transformed variables (W^{-0.5} y)
might be wrong for WLS and GLS case | hatmatrix_diag | python | statsmodels/statsmodels | statsmodels/sandbox/regression/penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/penalized.py | BSD-3-Clause |
def hatmatrix_trace(self):
"""trace of hat matrix
"""
return self.hatmatrix_diag.sum() | trace of hat matrix | hatmatrix_trace | python | statsmodels/statsmodels | statsmodels/sandbox/regression/penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/penalized.py | BSD-3-Clause |
def test_compatibility(self):
"""Hypothesis test for the compatibility of prior mean with data
"""
# TODO: should we store the OLS results ? not needed so far, but maybe cache
#params_ols = np.linalg.pinv(self.model.exog).dot(self.model.endog)
#res = self.wald_test(self.model.r_matrix, q_matrix=self.model.q_matrix, use_f=False)
#from scratch
res_ols = OLS(self.model.endog, self.model.exog).fit()
r_mat = self.model.r_matrix
r_diff = self.model.q_matrix - r_mat.dot(res_ols.params)[:,None]
ols_cov_r = res_ols.cov_params(r_matrix=r_mat)
statistic = r_diff.T.dot(np.linalg.solve(ols_cov_r + self.model.sigma_prior, r_diff))
from scipy import stats
df = np.linalg.matrix_rank(self.model.sigma_prior) # same as r_mat.shape[0]
pvalue = stats.chi2.sf(statistic, df)
# TODO: return results class
return statistic, pvalue, df | Hypothesis test for the compatibility of prior mean with data | test_compatibility | python | statsmodels/statsmodels | statsmodels/sandbox/regression/penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/penalized.py | BSD-3-Clause |
def share_data(self):
"""a measure for the fraction of the data in the estimation result
The share of the prior information is `1 - share_data`.
Returns
-------
share : float between 0 and 1
share of data defined as the ration between effective degrees of
freedom of the model and the number (TODO should be rank) of the
explanatory variables.
"""
# this is hatmatrix_trace / self.exog.shape[1]
# This needs to use rank of exog and not shape[1],
# since singular exog is allowed
return (self.df_model + 1) / self.model.rank # + 1 is for constant | a measure for the fraction of the data in the estimation result
The share of the prior information is `1 - share_data`.
Returns
-------
share : float between 0 and 1
share of data defined as the ration between effective degrees of
freedom of the model and the number (TODO should be rank) of the
explanatory variables. | share_data | python | statsmodels/statsmodels | statsmodels/sandbox/regression/penalized.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/penalized.py | BSD-3-Clause |
def randintw(w, size=1):
'''generate integer random variables given probabilties
useful because it can be used as index into any array or sequence type
Parameters
----------
w : 1d array_like
sequence of weights, probabilities. The weights are normalized to add
to one.
size : int or tuple of ints
shape of output array
Returns
-------
rvs : array of shape given by size
random variables each distributed according to the same discrete
distribution defined by (normalized) w.
Examples
--------
>>> np.random.seed(0)
>>> randintw([0.4, 0.4, 0.2], size=(2,6))
array([[1, 1, 1, 1, 1, 1],
[1, 2, 2, 0, 1, 1]])
>>> np.bincount(randintw([0.6, 0.4, 0.0], size=3000))/3000.
array([ 0.59566667, 0.40433333])
'''
#from Charles Harris, numpy mailing list
from numpy.random import random
p = np.cumsum(w)/np.sum(w)
rvs = p.searchsorted(random(np.prod(size))).reshape(size)
return rvs | generate integer random variables given probabilties
useful because it can be used as index into any array or sequence type
Parameters
----------
w : 1d array_like
sequence of weights, probabilities. The weights are normalized to add
to one.
size : int or tuple of ints
shape of output array
Returns
-------
rvs : array of shape given by size
random variables each distributed according to the same discrete
distribution defined by (normalized) w.
Examples
--------
>>> np.random.seed(0)
>>> randintw([0.4, 0.4, 0.2], size=(2,6))
array([[1, 1, 1, 1, 1, 1],
[1, 2, 2, 0, 1, 1]])
>>> np.bincount(randintw([0.6, 0.4, 0.0], size=3000))/3000.
array([ 0.59566667, 0.40433333]) | randintw | python | statsmodels/statsmodels | statsmodels/sandbox/regression/treewalkerclass.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/treewalkerclass.py | BSD-3-Clause |
def getbranches(tree):
'''
walk tree to get list of branches
Parameters
----------
tree : list of tuples
tree as defined for RU2NMNL
Returns
-------
branch : list
list of all branch names
'''
if isinstance(tree, tuple):
name, subtree = tree
a = [name]
for st in subtree:
a.extend(getbranches(st))
return a
return [] | walk tree to get list of branches
Parameters
----------
tree : list of tuples
tree as defined for RU2NMNL
Returns
-------
branch : list
list of all branch names | getbranches | python | statsmodels/statsmodels | statsmodels/sandbox/regression/treewalkerclass.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/treewalkerclass.py | BSD-3-Clause |
def getnodes(tree):
'''
walk tree to get list of branches and list of leaves
Parameters
----------
tree : list of tuples
tree as defined for RU2NMNL
Returns
-------
branch : list
list of all branch names
leaves : list
list of all leaves names
'''
if isinstance(tree, tuple):
name, subtree = tree
ab = [name]
al = []
#degenerate branches
if len(subtree) == 1:
adeg = [name]
else:
adeg = []
for st in subtree:
b_val, l_val, d_val = getnodes(st)
ab.extend(b_val)
al.extend(l_val)
adeg.extend(d_val)
return ab, al, adeg
return [], [tree], [] | walk tree to get list of branches and list of leaves
Parameters
----------
tree : list of tuples
tree as defined for RU2NMNL
Returns
-------
branch : list
list of all branch names
leaves : list
list of all leaves names | getnodes | python | statsmodels/statsmodels | statsmodels/sandbox/regression/treewalkerclass.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/treewalkerclass.py | BSD-3-Clause |
def get_probs(self, params):
'''
obtain the probability array given an array of parameters
This is the function that can be called by loglike or other methods
that need the probabilities as function of the params.
Parameters
----------
params : 1d array, (nparams,)
coefficients and tau that parameterize the model. The required
length can be obtained by nparams. (and will depend on the number
of degenerate leaves - not yet)
Returns
-------
probs : ndarray, (nobs, nchoices)
probabilities for all choices for each observation. The order
is available by attribute leaves. See note in docstring of class
'''
self.recursionparams = params
self.calc_prob(self.tree)
probs_array = np.array([self.probs[leaf] for leaf in self.leaves])
return probs_array | obtain the probability array given an array of parameters
This is the function that can be called by loglike or other methods
that need the probabilities as function of the params.
Parameters
----------
params : 1d array, (nparams,)
coefficients and tau that parameterize the model. The required
length can be obtained by nparams. (and will depend on the number
of degenerate leaves - not yet)
Returns
-------
probs : ndarray, (nobs, nchoices)
probabilities for all choices for each observation. The order
is available by attribute leaves. See note in docstring of class | get_probs | python | statsmodels/statsmodels | statsmodels/sandbox/regression/treewalkerclass.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/treewalkerclass.py | BSD-3-Clause |
def calc_prob(self, tree, parent=None):
'''walking a tree bottom-up based on dictionary
'''
#0.5#2 #placeholder for now
#should be tau=self.taus[name] but as part of params for optimization
datadict = self.datadict
branchsum = self.branchsum
if isinstance(tree, tuple): #assumes leaves are int for choice index
name, subtree = tree
self.branchleaves[name] = [] #register branch in dictionary
tau = self.recursionparams[self.paramsidx['tau_'+name]]
if DEBUG:
print('----------- starting next branch-----------')
print(name, datadict[name], 'tau=', tau)
print('subtree', subtree)
branchvalue = []
if testxb == 2:
branchsum = 0
elif testxb == 1:
branchsum = datadict[name]
else:
branchsum = name
for b in subtree:
if DEBUG:
print(b)
bv = self.calc_prob(b, name)
bv = np.exp(bv/tau) #this should not be here, when adding branch data
branchvalue.append(bv)
branchsum = branchsum + bv
self.branchvalues[name] = branchvalue #keep track what was returned
if DEBUG:
print('----------- returning to branch-----------')
print(name)
print('branchsum in branch', name, branchsum)
if parent:
if DEBUG:
print('parent', parent)
self.branchleaves[parent].extend(self.branchleaves[name])
if 0: #not name == 'top': # not used anymore !!! ???
#if not name == 'top':
#TODO: do I need this only on the lowest branches ?
tmpsum = 0
for k in self.branchleaves[name]:
#similar to this is now also in return branch values
#depends on what will be returned
tmpsum += self.probs[k]
np.log(tmpsum)
for k in self.branchleaves[name]:
self.probstxt[k] = self.probstxt[k] + ['*' + name + '-prob' +
'(%s)' % ', '.join(self.paramsind[name])]
#TODO: does this use the denominator twice now
self.probs[k] = self.probs[k] / tmpsum
if np.size(self.datadict[name])>0:
#not used yet, might have to move one indentation level
#self.probs[k] = self.probs[k] / tmpsum
## np.exp(-self.datadict[name] *
## np.sum(self.recursionparams[self.parinddict[name]]))
if DEBUG:
print('self.datadict[name], self.probs[k]')
print(self.datadict[name], self.probs[k])
#if not name == 'top':
# self.probs[k] = self.probs[k] * np.exp( iv)
#walk one level down again to add branch probs to instance.probs
self.bprobs[name] = []
for bidx, b in enumerate(subtree):
if DEBUG:
print('repr(b)', repr(b), bidx)
#if len(b) == 1: #TODO: skip leaves, check this
if not isinstance(b, tuple): # isinstance(b, str):
#TODO: replace this with a check for branch (tuple) instead
#this implies name is a bottom branch,
#possible to add special things here
self.bprobs[name].append(self.probs[b])
#TODO: need tau possibly here
self.probs[b] = self.probs[b] / branchsum
if DEBUG:
print('*********** branchsum at bottom branch', branchsum)
#self.bprobs[name].append(self.probs[b])
else:
bname = b[0]
branchsum2 = sum(self.branchvalues[name])
assert np.abs(branchsum - branchsum2).sum() < 1e-8
bprob = branchvalue[bidx]/branchsum
self.bprobs[name].append(bprob)
for k in self.branchleaves[bname]:
if DEBUG:
print('branchprob', bname, k, bprob, branchsum)
#temporary hack with maximum to avoid zeros
self.probs[k] = self.probs[k] * np.maximum(bprob, 1e-4)
if DEBUG:
print('working on branch', tree, branchsum)
if testxb<2:
return branchsum
else: #this is the relevant part
self.branchsums[name] = branchsum
if np.size(self.datadict[name])>0:
branchxb = np.sum(self.datadict[name] *
self.recursionparams[self.parinddict[name]])
else:
branchxb = 0
if not name=='top':
tau = self.recursionparams[self.paramsidx['tau_'+name]]
else:
tau = 1
# Unused result commented out
# branchxb + tau * branchsum #which tau: name or parent???
return branchxb + tau * np.log(branchsum) #iv
#branchsum is now IV, TODO: add effect of branch variables
else:
tau = self.recursionparams[self.paramsidx['tau_'+parent]]
if DEBUG:
print('parent', parent)
self.branchleaves[parent].append(tree) # register leave with parent
self.probstxt[tree] = [tree + '-prob' +
'(%s)' % ', '.join(self.paramsind[tree])]
#this is not yet a prob, not normalized to 1, it is exp(x*b)
leafprob = np.exp(np.sum(self.datadict[tree] *
self.recursionparams[self.parinddict[tree]])
/ tau) # fake tau for now, wrong spot ???
#it seems I get the same answer with and without tau here
self.probs[tree] = leafprob #= 1 #try initialization only
#TODO: where should I add tau in the leaves
if testxb == 2:
return np.log(leafprob)
elif testxb == 1:
leavessum = np.array(datadict[tree]) # sum((datadict[bi] for bi in datadict[tree]))
if DEBUG:
print('final branch with', tree, ''.join(tree), leavessum) #sum(tree)
return leavessum #sum(xb[tree])
elif testxb == 0:
return ''.join(tree) #sum(tree) | walking a tree bottom-up based on dictionary | calc_prob | python | statsmodels/statsmodels | statsmodels/sandbox/regression/treewalkerclass.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/treewalkerclass.py | BSD-3-Clause |
def maxabs(x):
'''just a shortcut to np.abs(x).max()
'''
return np.abs(x).max() | just a shortcut to np.abs(x).max() | maxabs | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def whiten(self, X):
"""Not implemented"""
pass | Not implemented | whiten | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def fit(self):
'''estimate model using 2SLS IV regression
Returns
-------
results : instance of RegressionResults
regression result
Notes
-----
This returns a generic RegressioResults instance as defined for the
linear models.
Parameter estimates and covariance are correct, but other results
have not been tested yet, to see whether they apply without changes.
'''
#Greene 5th edt., p.78 section 5.4
#move this maybe
y,x,z = self.endog, self.exog, self.instrument
# TODO: this uses "textbook" calculation, improve linalg
ztz = np.dot(z.T, z)
ztx = np.dot(z.T, x)
self.xhatparams = xhatparams = np.linalg.solve(ztz, ztx)
#print 'x.T.shape, xhatparams.shape', x.shape, xhatparams.shape
F = xhat = np.dot(z, xhatparams)
FtF = np.dot(F.T, F)
self.xhatprod = FtF #store for Housman specification test
Ftx = np.dot(F.T, x)
Fty = np.dot(F.T, y)
params = np.linalg.solve(FtF, Fty)
Ftxinv = np.linalg.inv(Ftx)
self.normalized_cov_params = np.dot(Ftxinv.T, np.dot(FtF, Ftxinv))
lfit = IVRegressionResults(self, params,
normalized_cov_params=self.normalized_cov_params)
lfit.exog_hat_params = xhatparams
lfit.exog_hat = xhat # TODO: do we want to store this, might be large
self._results_ols2nd = OLS(y, xhat).fit()
return RegressionResultsWrapper(lfit) | estimate model using 2SLS IV regression
Returns
-------
results : instance of RegressionResults
regression result
Notes
-----
This returns a generic RegressioResults instance as defined for the
linear models.
Parameter estimates and covariance are correct, but other results
have not been tested yet, to see whether they apply without changes. | fit | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
exog : array_like
Design / exogenous data
params : array_like, optional after fit has been called
Parameters of a linear model
Returns
-------
An array of fitted values
Notes
-----
If the model as not yet been fit, params is not optional.
"""
if exog is None:
exog = self.exog
return np.dot(exog, params) | Return linear predicted values from a design matrix.
Parameters
----------
exog : array_like
Design / exogenous data
params : array_like, optional after fit has been called
Parameters of a linear model
Returns
-------
An array of fitted values
Notes
-----
If the model as not yet been fit, params is not optional. | predict | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def spec_hausman(self, dof=None):
'''Hausman's specification test
See Also
--------
spec_hausman : generic function for Hausman's specification test
'''
#use normalized cov_params for OLS
endog, exog = self.model.endog, self.model.exog
resols = OLS(endog, exog).fit()
normalized_cov_params_ols = resols.model.normalized_cov_params
# Stata `ivendog` does not use df correction for se
#se2 = resols.mse_resid #* resols.df_resid * 1. / len(endog)
se2 = resols.ssr / len(endog)
params_diff = self.params - resols.params
cov_diff = np.linalg.pinv(self.model.xhatprod) - normalized_cov_params_ols
#TODO: the following is very inefficient, solves problem (svd) twice
#use linalg.lstsq or svd directly
#cov_diff will very often be in-definite (singular)
if not dof:
dof = np.linalg.matrix_rank(cov_diff)
cov_diffpinv = np.linalg.pinv(cov_diff)
H = np.dot(params_diff, np.dot(cov_diffpinv, params_diff))/se2
pval = stats.chi2.sf(H, dof)
return H, pval, dof | Hausman's specification test
See Also
--------
spec_hausman : generic function for Hausman's specification test | spec_hausman | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Default is `var_##` for ## in p the number of regressors
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: import where we need it (for now), add as cached attributes
from statsmodels.stats.stattools import (
durbin_watson,
jarque_bera,
omni_normtest,
)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
#TODO: reuse condno from somewhere else ?
#condno = np.linalg.cond(np.dot(self.wexog.T, self.wexog))
wexog = self.model.wexog
eigvals = np.linalg.eigvalsh(np.dot(wexog.T, wexog))
eigvals = np.sort(eigvals) #in increasing order
condno = np.sqrt(eigvals[-1]/eigvals[0])
# TODO: check what is valid.
# box-pierce, breusch-pagan, durbin's h are not with endogenous on rhs
# use Cumby Huizinga 1992 instead
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[0])
#TODO not used yet
#diagn_left_header = ['Models stats']
#diagn_right_header = ['Residual stats']
#TODO: requiring list/iterable is a bit annoying
#need more control over formatting
#TODO: default do not work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Two Stage']),
('', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
('Df Model:', None), #[self.df_model])
]
top_right = [('R-squared:', ["%#8.3f" % self.rsquared]),
('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue] ),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
#('Log-Likelihood:', None), #["%#6.4g" % self.llf]),
#('AIC:', ["%#8.4g" % self.aic]),
#('BIC:', ["%#8.4g" % self.bic])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:', ["%#8.3f" % durbin_watson(self.wresid)]),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=True)
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
return smry | Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Default is `var_##` for ## in p the number of regressors
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results | summary | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def __init__(self, endog, exog, instrument, k_moms=None, k_params=None,
missing='none', **kwds):
'''
maybe drop and use mixin instead
TODO: GMM does not really care about the data, just the moment conditions
'''
instrument = self._check_inputs(instrument, endog) # attaches if needed
super().__init__(endog, exog, missing=missing,
instrument=instrument)
# self.endog = endog
# self.exog = exog
# self.instrument = instrument
self.nobs = endog.shape[0]
if k_moms is not None:
self.nmoms = k_moms
elif instrument is not None:
self.nmoms = instrument.shape[1]
else:
self.nmoms = np.nan
if k_params is not None:
self.k_params = k_params
elif instrument is not None:
self.k_params = exog.shape[1]
else:
self.k_params = np.nan
self.__dict__.update(kwds)
self.epsilon_iter = 1e-6 | maybe drop and use mixin instead
TODO: GMM does not really care about the data, just the moment conditions | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
def set_param_names(self, param_names, k_params=None):
"""set the parameter names in the model
Parameters
----------
param_names : list[str]
param_names should have the same length as the number of params
k_params : None or int
If k_params is None, then the k_params attribute is used, unless
it is None.
If k_params is not None, then it will also set the k_params
attribute.
"""
if k_params is not None:
self.k_params = k_params
else:
k_params = self.k_params
if k_params == len(param_names):
self.data.xnames = param_names
else:
raise ValueError('param_names has the wrong length') | set the parameter names in the model
Parameters
----------
param_names : list[str]
param_names should have the same length as the number of params
k_params : None or int
If k_params is None, then the k_params attribute is used, unless
it is None.
If k_params is not None, then it will also set the k_params
attribute. | set_param_names | python | statsmodels/statsmodels | statsmodels/sandbox/regression/gmm.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/regression/gmm.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.