code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def whiten(self, x):
"""
whiten the data by linear transformation
Parameters
----------
X : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholcovinv.T)
Notes
-----
This only does rescaling, it does not subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable.
"""
x = np.asarray(x)
if np.any(self.cov):
#return np.dot(self.cholcovinv, x)
return np.dot(x, self.cholcovinv.T)
else:
return x | whiten the data by linear transformation
Parameters
----------
X : array_like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholcovinv.T)
Notes
-----
This only does rescaling, it does not subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable. | whiten | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def rvs(self, size=1):
'''random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
Notes
-----
uses numpy.random.multivariate_normal directly
'''
return np.random.multivariate_normal(self.mean, self.cov, size=size) | random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
Notes
-----
uses numpy.random.multivariate_normal directly | rvs | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def pdf(self, x):
'''probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
pdf : float or array
probability density value of each random vector
'''
return np.exp(self.logpdf(x)) | probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
pdf : float or array
probability density value of each random vector | pdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
does not work now because of dot in whiten
'''
x = np.asarray(x)
x_whitened = self.whiten(x - self.mean)
SSR = np.sum(x_whitened**2, -1)
llf = -SSR
llf -= self.nvars * np.log(2. * np.pi)
llf -= self.logdetcov
llf *= 0.5
return llf | logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
does not work now because of dot in whiten | logpdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def rvs(self, size=1):
'''random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
Notes
-----
uses numpy.random.multivariate_normal directly
'''
return np.random.multivariate_normal(self.mean, self.sigma, size=size) | random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
Notes
-----
uses numpy.random.multivariate_normal directly | rvs | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
does not work now because of dot in whiten
'''
x = np.asarray(x)
x_whitened = self.whiten(x - self.mean)
SSR = np.sum(x_whitened**2, -1)
llf = -SSR
llf -= self.nvars * np.log(2. * np.pi)
llf -= self.logdetsigma
llf *= 0.5
return llf | logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
does not work now because of dot in whiten | logpdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def cdf(self, x, **kwds):
'''cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector
'''
#lower = -np.inf * np.ones_like(x)
#return mvstdnormcdf(lower, self.standardize(x), self.corr, **kwds)
return mvnormcdf(x, self.mean, self.cov, **kwds) | cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector | cdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def cov(self):
'''covariance matrix'''
return self.sigma | covariance matrix | cov | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def affine_transformed(self, shift, scale_matrix):
'''return distribution of an affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVNormal
instance of multivariate normal distribution given by affine
transformation
Notes
-----
the affine transformation is defined by
y = a + B x
where a is shift,
B is a scale matrix for the linear transformation
Notes
-----
This should also work to select marginal distributions, but not
tested for this case yet.
currently only tested because it's called by standardized
'''
B = scale_matrix #tmp variable
mean_new = np.dot(B, self.mean) + shift
sigma_new = np.dot(np.dot(B, self.sigma), B.T)
return MVNormal(mean_new, sigma_new) | return distribution of an affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVNormal
instance of multivariate normal distribution given by affine
transformation
Notes
-----
the affine transformation is defined by
y = a + B x
where a is shift,
B is a scale matrix for the linear transformation
Notes
-----
This should also work to select marginal distributions, but not
tested for this case yet.
currently only tested because it's called by standardized | affine_transformed | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def __init__(self, mean, sigma, df):
'''initialize instance
Parameters
----------
mean : array_like
parameter mu (might be renamed), for symmetric distributions this
is the mean
sigma : array_like, 2d
dispersion matrix, covariance matrix in normal distribution, but
only proportional to covariance matrix in t distribution
args : list
distribution specific arguments, e.g. df for t distribution
kwds : dict
currently not used
'''
super().__init__(mean, sigma)
self.extra_args = ['df'] #overwrites extra_args of super
self.df = df | initialize instance
Parameters
----------
mean : array_like
parameter mu (might be renamed), for symmetric distributions this
is the mean
sigma : array_like, 2d
dispersion matrix, covariance matrix in normal distribution, but
only proportional to covariance matrix in t distribution
args : list
distribution specific arguments, e.g. df for t distribution
kwds : dict
currently not used | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def rvs(self, size=1):
'''random variables with Student T distribution
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
- TODO: Not sure if this works for size tuples with len>1.
Notes
-----
generated as a chi-square mixture of multivariate normal random
variables.
does this require df>2 ?
'''
from .multivariate import multivariate_t_rvs
return multivariate_t_rvs(self.mean, self.sigma, df=self.df, n=size) | random variables with Student T distribution
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
- TODO: Not sure if this works for size tuples with len>1.
Notes
-----
generated as a chi-square mixture of multivariate normal random
variables.
does this require df>2 ? | rvs | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
'''
x = np.asarray(x)
df = self.df
nvars = self.nvars
x_whitened = self.whiten(x - self.mean) #should be float
llf = - nvars * np_log(df * np_pi)
llf -= self.logdetsigma
llf -= (df + nvars) * np_log(1 + np.sum(x_whitened**2,-1) / df)
llf *= 0.5
llf += sps_gamln((df + nvars) / 2.) - sps_gamln(df / 2.)
return llf | logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector | logpdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def cdf(self, x, **kwds):
'''cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector
'''
lower = -np.inf * np.ones_like(x)
#std_sigma = np.sqrt(np.diag(self.sigma))
upper = (x - self.mean)/self.std_sigma
return mvstdtprob(lower, upper, self.corr, self.df, **kwds) | cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector | cdf | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def cov(self):
'''covariance matrix
The covariance matrix for the t distribution does not exist for df<=2,
and is equal to sigma * df/(df-2) for df>2
'''
if self.df <= 2:
return np.nan * np.ones_like(self.sigma)
else:
return self.df / (self.df - 2.) * self.sigma | covariance matrix
The covariance matrix for the t distribution does not exist for df<=2,
and is equal to sigma * df/(df-2) for df>2 | cov | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def affine_transformed(self, shift, scale_matrix):
'''return distribution of a full rank affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVT
instance of multivariate t distribution given by affine
transformation
Notes
-----
This checks for eigvals<=0, so there are possible problems for cases
with positive eigenvalues close to zero.
see: http://www.statlect.com/mcdstu1.htm
I'm not sure about general case, non-full rank transformation are not
multivariate t distributed.
y = a + B x
where a is shift,
B is full rank scale matrix with same dimension as sigma
'''
#full rank method could also be in elliptical and called with super
#after the rank check
B = scale_matrix #tmp variable as shorthand
if not B.shape == (self.nvars, self.nvars):
if (np.linalg.eigvals(B) <= 0).any():
raise ValueError('affine transform has to be full rank')
mean_new = np.dot(B, self.mean) + shift
sigma_new = np.dot(np.dot(B, self.sigma), B.T)
return MVT(mean_new, sigma_new, self.df) | return distribution of a full rank affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVT
instance of multivariate t distribution given by affine
transformation
Notes
-----
This checks for eigvals<=0, so there are possible problems for cases
with positive eigenvalues close to zero.
see: http://www.statlect.com/mcdstu1.htm
I'm not sure about general case, non-full rank transformation are not
multivariate t distributed.
y = a + B x
where a is shift,
B is full rank scale matrix with same dimension as sigma | affine_transformed | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/mv_normal.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/mv_normal.py | BSD-3-Clause |
def mc2mvsk(args):
'''convert central moments to mean, variance, skew, kurtosis
'''
mc, mc2, mc3, mc4 = args
skew = np.divide(mc3, mc2**1.5)
kurt = np.divide(mc4, mc2**2.0) - 3.0
return (mc, mc2, skew, kurt) | convert central moments to mean, variance, skew, kurtosis | mc2mvsk | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/tests/check_moments.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/tests/check_moments.py | BSD-3-Clause |
def mnc2mvsk(args):
'''convert central moments to mean, variance, skew, kurtosis
'''
#convert four non-central moments to central moments
mnc, mnc2, mnc3, mnc4 = args
mc = mnc
mc2 = mnc2 - mnc*mnc
mc3 = mnc3 - (3*mc*mc2+mc**3) # 3rd central moment
mc4 = mnc4 - (4*mc*mc3+6*mc*mc*mc2+mc**4)
return mc2mvsk((mc, mc2, mc3, mc4)) | convert central moments to mean, variance, skew, kurtosis | mnc2mvsk | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/tests/check_moments.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/tests/check_moments.py | BSD-3-Clause |
def nct_kurt_bug():
'''test for incorrect kurtosis of nct
D. Hogben, R. S. Pinkham, M. B. Wilk: The Moments of the Non-Central
t-DistributionAuthor(s): Biometrika, Vol. 48, No. 3/4 (Dec., 1961),
pp. 465-468
'''
from numpy.testing import assert_almost_equal
mvsk_10_1 = (1.08372, 1.325546, 0.39993, 1.2499424941142943)
assert_almost_equal(stats.nct.stats(10, 1, moments='mvsk'), mvsk_10_1, decimal=6)
c1=np.array([1.08372])
c2=np.array([.0755460, 1.25000])
c3 = np.array([.0297802, .580566])
np.array([0.0425458, 1.17491, 6.25])
#calculation for df=10, for arbitrary nc
nc = 1
mc1 = c1.item()
mc2 = (c2*nc**np.array([2,0])).sum()
mc3 = (c3*nc**np.array([3,1])).sum()
mc4 = np.array([0.0425458, 1.17491, 6.25])
mc2mvsk((mc1,mc2,mc3,mc4)) | test for incorrect kurtosis of nct
D. Hogben, R. S. Pinkham, M. B. Wilk: The Moments of the Non-Central
t-DistributionAuthor(s): Biometrika, Vol. 48, No. 3/4 (Dec., 1961),
pp. 465-468 | nct_kurt_bug | python | statsmodels/statsmodels | statsmodels/sandbox/distributions/tests/check_moments.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/distributions/tests/check_moments.py | BSD-3-Clause |
def calc_factors(self, x=None, keepdim=0, addconst=True):
'''get factor decomposition of exogenous variables
This uses principal component analysis to obtain the factors. The number
of factors kept is the maximum that will be considered in the regression.
'''
if x is None:
x = self.exog
else:
x = np.asarray(x)
xred, fact, evals, evecs = pca(x, keepdim=keepdim, normalize=1)
self.exog_reduced = xred
#self.factors = fact
if addconst:
self.factors = sm.add_constant(fact, prepend=True)
self.hasconst = 1 #needs to be int
else:
self.factors = fact
self.hasconst = 0 #needs to be int
self.evals = evals
self.evecs = evecs | get factor decomposition of exogenous variables
This uses principal component analysis to obtain the factors. The number
of factors kept is the maximum that will be considered in the regression. | calc_factors | python | statsmodels/statsmodels | statsmodels/sandbox/datarich/factormodels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/datarich/factormodels.py | BSD-3-Clause |
def fit_find_nfact(self, maxfact=None, skip_crossval=True, cv_iter=None):
'''estimate the model and selection criteria for up to maxfact factors
The selection criteria that are calculated are AIC, BIC, and R2_adj. and
additionally cross-validation prediction error sum of squares if `skip_crossval`
is false. Cross-validation is not used by default because it can be
time consuming to calculate.
By default the cross-validation method is Leave-one-out on the full dataset.
A different cross-validation sample can be specified as an argument to
cv_iter.
Results are attached in `results_find_nfact`
'''
#print 'OLS on Factors'
if not hasattr(self, 'factors'):
self.calc_factors()
hasconst = self.hasconst
if maxfact is None:
maxfact = self.factors.shape[1] - hasconst
if (maxfact+hasconst) < 1:
raise ValueError('nothing to do, number of factors (incl. constant) should ' +
'be at least 1')
#temporary safety
maxfact = min(maxfact, 10)
y0 = self.endog
results = []
#xred, fact, eva, eve = pca(x0, keepdim=0, normalize=1)
for k in range(1, maxfact+hasconst): #k includes now the constnat
#xred, fact, eva, eve = pca(x0, keepdim=k, normalize=1)
# this is faster and same result
fact = self.factors[:,:k]
res = sm.OLS(y0, fact).fit()
## print 'k =', k
## print res.params
## print 'aic: ', res.aic
## print 'bic: ', res.bic
## print 'llf: ', res.llf
## print 'R2 ', res.rsquared
## print 'R2 adj', res.rsquared_adj
if not skip_crossval:
if cv_iter is None:
cv_iter = LeaveOneOut(len(y0))
prederr2 = 0.
for inidx, outidx in cv_iter:
res_l1o = sm.OLS(y0[inidx], fact[inidx,:]).fit()
#print data.endog[outidx], res.model.predict(data.exog[outidx,:]),
prederr2 += (y0[outidx] -
res_l1o.model.predict(res_l1o.params, fact[outidx,:]))**2.
else:
prederr2 = np.nan
results.append([k, res.aic, res.bic, res.rsquared_adj, prederr2])
self.results_find_nfact = results = np.array(results)
self.best_nfact = np.r_[(np.argmin(results[:,1:3],0), np.argmax(results[:,3],0),
np.argmin(results[:,-1],0))] | estimate the model and selection criteria for up to maxfact factors
The selection criteria that are calculated are AIC, BIC, and R2_adj. and
additionally cross-validation prediction error sum of squares if `skip_crossval`
is false. Cross-validation is not used by default because it can be
time consuming to calculate.
By default the cross-validation method is Leave-one-out on the full dataset.
A different cross-validation sample can be specified as an argument to
cv_iter.
Results are attached in `results_find_nfact` | fit_find_nfact | python | statsmodels/statsmodels | statsmodels/sandbox/datarich/factormodels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/datarich/factormodels.py | BSD-3-Clause |
def summary_find_nfact(self):
'''provides a summary for the selection of the number of factors
Returns
-------
sumstr : str
summary of the results for selecting the number of factors
'''
if not hasattr(self, 'results_find_nfact'):
self.fit_find_nfact()
results = self.results_find_nfact
sumstr = ''
sumstr += '\n' + 'Best result for k, by AIC, BIC, R2_adj, L1O'
# best = np.r_[(np.argmin(results[:,1:3],0), np.argmax(results[:,3],0),
# np.argmin(results[:,-1],0))]
sumstr += '\n' + ' '*19 + '%5d %4d %6d %5d' % tuple(self.best_nfact)
from statsmodels.iolib.table import SimpleTable
headers = 'k, AIC, BIC, R2_adj, L1O'.split(', ')
numformat = ['%6d'] + ['%10.3f']*4 #'%10.4f'
txt_fmt1 = dict(data_fmts = numformat)
tabl = SimpleTable(results, headers, None, txt_fmt=txt_fmt1)
sumstr += '\n' + "PCA regression on simulated data,"
sumstr += '\n' + "DGP: 2 factors and 4 explanatory variables"
sumstr += '\n' + tabl.__str__()
sumstr += '\n' + "Notes: k is number of components of PCA,"
sumstr += '\n' + " constant is added additionally"
sumstr += '\n' + " k=0 means regression on constant only"
sumstr += '\n' + " L1O: sum of squared prediction errors for leave-one-out"
return sumstr | provides a summary for the selection of the number of factors
Returns
-------
sumstr : str
summary of the results for selecting the number of factors | summary_find_nfact | python | statsmodels/statsmodels | statsmodels/sandbox/datarich/factormodels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/datarich/factormodels.py | BSD-3-Clause |
def __repr__(self):
"""Provide something sane to print."""
repr = "Single Index Model \n"
repr += "Number of variables: K = " + str(self.K) + "\n"
repr += "Number of samples: nobs = " + str(self.nobs) + "\n"
repr += "Variable types: " + self.var_type + "\n"
repr += "BW selection method: cv_ls" + "\n"
repr += "Estimator type: local constant" + "\n"
return repr | Provide something sane to print. | __repr__ | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernel_extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernel_extras.py | BSD-3-Clause |
def _est_b_bw(self):
"""
Computes the (beta) coefficients and the bandwidths.
Minimizes ``cv_loo`` with respect to ``b`` and ``bw``.
"""
params0 = np.random.uniform(size=(self.k_linear + self.K, ))
b_bw = optimize.fmin(self.cv_loo, params0, disp=0)
b = b_bw[0 : self.k_linear]
bw = b_bw[self.k_linear:]
#bw = self._set_bw_bounds(np.asarray(bw))
return b, bw | Computes the (beta) coefficients and the bandwidths.
Minimizes ``cv_loo`` with respect to ``b`` and ``bw``. | _est_b_bw | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernel_extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernel_extras.py | BSD-3-Clause |
def cv_loo(self, params):
"""
Similar to the cross validation leave-one-out estimator.
Modified to reflect the linear components.
Parameters
----------
params : array_like
Vector consisting of the coefficients (b) and the bandwidths (bw).
The first ``k_linear`` elements are the coefficients.
Returns
-------
L : float
The value of the objective function
References
----------
See p.254 in [1]
"""
params = np.asarray(params)
b = params[0 : self.k_linear]
bw = params[self.k_linear:]
LOO_X = LeaveOneOut(self.exog)
LOO_Y = LeaveOneOut(self.endog).__iter__()
LOO_Z = LeaveOneOut(self.exog_nonparametric).__iter__()
Xb = np.dot(self.exog, b)[:,None]
L = 0
for ii, X_not_i in enumerate(LOO_X):
Y = next(LOO_Y)
Z = next(LOO_Z)
Xb_j = np.dot(X_not_i, b)[:,None]
Yx = Y - Xb_j
G = self.func(bw, endog=Yx, exog=-Z,
data_predict=-self.exog_nonparametric[ii, :])[0]
lt = Xb[ii, :] #.sum() # linear term
L += (self.endog[ii] - lt - G) ** 2
return L | Similar to the cross validation leave-one-out estimator.
Modified to reflect the linear components.
Parameters
----------
params : array_like
Vector consisting of the coefficients (b) and the bandwidths (bw).
The first ``k_linear`` elements are the coefficients.
Returns
-------
L : float
The value of the objective function
References
----------
See p.254 in [1] | cv_loo | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernel_extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernel_extras.py | BSD-3-Clause |
def fit(self, exog_predict=None, exog_nonparametric_predict=None):
"""Computes fitted values and marginal effects"""
if exog_predict is None:
exog_predict = self.exog
else:
exog_predict = _adjust_shape(exog_predict, self.k_linear)
if exog_nonparametric_predict is None:
exog_nonparametric_predict = self.exog_nonparametric
else:
exog_nonparametric_predict = _adjust_shape(exog_nonparametric_predict, self.K)
N_data_predict = np.shape(exog_nonparametric_predict)[0]
mean = np.empty((N_data_predict,))
mfx = np.empty((N_data_predict, self.K))
Y = self.endog - np.dot(exog_predict, self.b)[:,None]
for i in range(N_data_predict):
mean_mfx = self.func(self.bw, Y, self.exog_nonparametric,
data_predict=exog_nonparametric_predict[i, :])
mean[i] = mean_mfx[0]
mfx_c = np.squeeze(mean_mfx[1])
mfx[i, :] = mfx_c
return mean, mfx | Computes fitted values and marginal effects | fit | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernel_extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernel_extras.py | BSD-3-Clause |
def __repr__(self):
"""Provide something sane to print."""
repr = "Semiparamatric Partially Linear Model \n"
repr += "Number of variables: K = " + str(self.K) + "\n"
repr += "Number of samples: N = " + str(self.nobs) + "\n"
repr += "Variable types: " + self.var_type + "\n"
repr += "BW selection method: cv_ls" + "\n"
repr += "Estimator type: local constant" + "\n"
return repr | Provide something sane to print. | __repr__ | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernel_extras.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernel_extras.py | BSD-3-Clause |
def inner_cont(polys, lower, upper, weight=None):
'''inner product of continuous function (with weight=1)
Parameters
----------
polys : list of callables
polynomial instances
lower : float
lower integration limit
upper : float
upper integration limit
weight : callable or None
weighting function
Returns
-------
innp : ndarray
symmetric 2d square array with innerproduct of all function pairs
err : ndarray
numerical error estimate from scipy.integrate.quad, same dimension as innp
Examples
--------
>>> from scipy.special import chebyt
>>> polys = [chebyt(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 2. , 0. , -0.66666667, 0. ],
[ 0. , 0.66666667, 0. , -0.4 ],
[-0.66666667, 0. , 0.93333333, 0. ],
[ 0. , -0.4 , 0. , 0.97142857]])
'''
n_polys = len(polys)
innerprod = np.empty((n_polys, n_polys))
innerprod.fill(np.nan)
interr = np.zeros((n_polys, n_polys))
for i in range(n_polys):
for j in range(i+1):
p1 = polys[i]
p2 = polys[j]
if weight is not None:
innp, err = integrate.quad(lambda x: p1(x)*p2(x)*weight(x),
lower, upper)
else:
innp, err = integrate.quad(lambda x: p1(x)*p2(x), lower, upper)
innerprod[i,j] = innp
interr[i,j] = err
if not i == j:
innerprod[j,i] = innp
interr[j,i] = err
return innerprod, interr | inner product of continuous function (with weight=1)
Parameters
----------
polys : list of callables
polynomial instances
lower : float
lower integration limit
upper : float
upper integration limit
weight : callable or None
weighting function
Returns
-------
innp : ndarray
symmetric 2d square array with innerproduct of all function pairs
err : ndarray
numerical error estimate from scipy.integrate.quad, same dimension as innp
Examples
--------
>>> from scipy.special import chebyt
>>> polys = [chebyt(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 2. , 0. , -0.66666667, 0. ],
[ 0. , 0.66666667, 0. , -0.4 ],
[-0.66666667, 0. , 0.93333333, 0. ],
[ 0. , -0.4 , 0. , 0.97142857]]) | inner_cont | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def is_orthonormal_cont(polys, lower, upper, rtol=0, atol=1e-08):
'''check whether functions are orthonormal
Parameters
----------
polys : list of polynomials or function
Returns
-------
is_orthonormal : bool
is False if the innerproducts are not close to 0 or 1
Notes
-----
this stops as soon as the first deviation from orthonormality is found.
Examples
--------
>>> from scipy.special import chebyt
>>> polys = [chebyt(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 2. , 0. , -0.66666667, 0. ],
[ 0. , 0.66666667, 0. , -0.4 ],
[-0.66666667, 0. , 0.93333333, 0. ],
[ 0. , -0.4 , 0. , 0.97142857]])
>>> is_orthonormal_cont(polys, -1, 1, atol=1e-6)
False
>>> polys = [ChebyTPoly(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 1.00000000e+00, 0.00000000e+00, -9.31270888e-14,
0.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00,
-9.47850712e-15],
[ -9.31270888e-14, 0.00000000e+00, 1.00000000e+00,
0.00000000e+00],
[ 0.00000000e+00, -9.47850712e-15, 0.00000000e+00,
1.00000000e+00]])
>>> is_orthonormal_cont(polys, -1, 1, atol=1e-6)
True
'''
for i in range(len(polys)):
for j in range(i+1):
p1 = polys[i]
p2 = polys[j]
innerprod = integrate.quad(lambda x: p1(x)*p2(x), lower, upper)[0]
#print i,j, innerprod
if not np.allclose(innerprod, i==j, rtol=rtol, atol=atol):
return False
return True | check whether functions are orthonormal
Parameters
----------
polys : list of polynomials or function
Returns
-------
is_orthonormal : bool
is False if the innerproducts are not close to 0 or 1
Notes
-----
this stops as soon as the first deviation from orthonormality is found.
Examples
--------
>>> from scipy.special import chebyt
>>> polys = [chebyt(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 2. , 0. , -0.66666667, 0. ],
[ 0. , 0.66666667, 0. , -0.4 ],
[-0.66666667, 0. , 0.93333333, 0. ],
[ 0. , -0.4 , 0. , 0.97142857]])
>>> is_orthonormal_cont(polys, -1, 1, atol=1e-6)
False
>>> polys = [ChebyTPoly(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 1.00000000e+00, 0.00000000e+00, -9.31270888e-14,
0.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00,
-9.47850712e-15],
[ -9.31270888e-14, 0.00000000e+00, 1.00000000e+00,
0.00000000e+00],
[ 0.00000000e+00, -9.47850712e-15, 0.00000000e+00,
1.00000000e+00]])
>>> is_orthonormal_cont(polys, -1, 1, atol=1e-6)
True | is_orthonormal_cont | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def fit(self, x, polybase=None, order=5, limits=None):
'''estimate the orthogonal polynomial approximation to the density
'''
if polybase is None:
polys = self.polys[:order]
else:
self.polybase = polybase
self.polys = polys = [polybase(i) for i in range(order)]
#move to init ?
if not hasattr(self, 'offsetfac'):
self.offsetfac = polys[0].offsetfactor
xmin, xmax = x.min(), x.max()
if limits is None:
self.offset = offset = (xmax - xmin) * self.offsetfac
limits = self.limits = (xmin - offset, xmax + offset)
interval_length = limits[1] - limits[0]
xinterval = xmax - xmin
# need to cover (half-)open intervalls
self.shrink = 1. / interval_length #xinterval/interval_length
offset = (interval_length - xinterval ) / 2.
self.shift = xmin - offset
self.x = x = self._transform(x)
coeffs = [(p(x)).mean() for p in polys]
self.coeffs = coeffs
self.polys = polys
self._verify() #verify that it is a proper density
return self #coeffs, polys | estimate the orthogonal polynomial approximation to the density | fit | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def __call__(self, xeval):
'''alias for evaluate, except no order argument'''
return self.evaluate(xeval) | alias for evaluate, except no order argument | __call__ | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def _verify(self):
'''check for bona fide density correction
currently only checks that density integrates to 1
` non-negativity - NotImplementedYet
'''
#watch out for circular/recursive usage
#evaluate uses domain of data, we stay offset away from bounds
intdomain = self.limits #self.polys[0].intdomain
self._corfactor = 1./integrate.quad(self.evaluate, *intdomain)[0]
#self._corshift = 0
#self._corfactor
return self._corfactor | check for bona fide density correction
currently only checks that density integrates to 1
` non-negativity - NotImplementedYet | _verify | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def _correction(self, x):
'''bona fide density correction
affine shift of density to make it into a proper density
'''
if self._corfactor != 1:
x *= self._corfactor
if self._corshift != 0:
x += self._corshift
return x | bona fide density correction
affine shift of density to make it into a proper density | _correction | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def _transform(self, x): # limits=None):
'''transform observation to the domain of the density
uses shrink and shift attribute which are set in fit to stay
'''
#use domain from first instance
#class does not have domain self.polybase.domain[0] AttributeError
domain = self.polys[0].domain
ilen = (domain[1] - domain[0])
shift = self.shift - domain[0]/self.shrink/ilen
shrink = self.shrink * ilen
return (x - shift) * shrink | transform observation to the domain of the density
uses shrink and shift attribute which are set in fit to stay | _transform | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/densityorthopoly.py | BSD-3-Clause |
def getH(self):
"""Getter for kernel bandwidth, H"""
return self._H | Getter for kernel bandwidth, H | getH | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def setH(self, value):
"""Setter for kernel bandwidth, H"""
self._H = value | Setter for kernel bandwidth, H | setH | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def _kernweight(self, x ):
"""returns the kernel weight for the independent multivariate kernel"""
if isinstance( self._kernels, CustomKernel ):
## Radial case
#d = x.T * x
#x is matrix, 2d, element wise sqrt looks wrong
#d = np.sqrt( x.T * x )
x = np.asarray(x)
#d = np.sqrt( (x * x).sum(-1) )
d = (x * x).sum(-1)
return self._kernels( np.asarray(d) ) | returns the kernel weight for the independent multivariate kernel | _kernweight | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def __call__(self, x):
"""
This simply returns the value of the kernel function at x
Does the same as weight if the function is normalised
"""
return self._kernweight(x) | This simply returns the value of the kernel function at x
Does the same as weight if the function is normalised | __call__ | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def __init__(self, shape, h = 1.0, domain = None, norm = None):
"""
shape should be a function taking and returning numeric type.
For sanity it should always return positive or zero but this is not
enforced in case you want to do weird things. Bear in mind that the
statistical tests etc. may not be valid for non-positive kernels.
The bandwidth of the kernel is supplied as h.
You may specify a domain as a list of 2 values [min, max], in which case
kernel will be treated as zero outside these values. This will speed up
calculation.
You may also specify the normalisation constant for the supplied Kernel.
If you do this number will be stored and used as the normalisation
without calculation. It is recommended you do this if you know the
constant, to speed up calculation. In particular if the shape function
provided is already normalised you should provide norm = 1.0.
Warning: I think several calculations assume that the kernel is
normalized. No tests for non-normalized kernel.
"""
self._normconst = norm # a value or None, if None, then calculate
self.domain = domain
self.weights = None
if callable(shape):
self._shape = shape
else:
raise TypeError("shape must be a callable object/function")
self._h = h
self._L2Norm = None
self._kernel_var = None
self._normal_reference_constant = None
self._order = None | shape should be a function taking and returning numeric type.
For sanity it should always return positive or zero but this is not
enforced in case you want to do weird things. Bear in mind that the
statistical tests etc. may not be valid for non-positive kernels.
The bandwidth of the kernel is supplied as h.
You may specify a domain as a list of 2 values [min, max], in which case
kernel will be treated as zero outside these values. This will speed up
calculation.
You may also specify the normalisation constant for the supplied Kernel.
If you do this number will be stored and used as the normalisation
without calculation. It is recommended you do this if you know the
constant, to speed up calculation. In particular if the shape function
provided is already normalised you should provide norm = 1.0.
Warning: I think several calculations assume that the kernel is
normalized. No tests for non-normalized kernel. | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def geth(self):
"""Getter for kernel bandwidth, h"""
return self._h | Getter for kernel bandwidth, h | geth | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def seth(self, value):
"""Setter for kernel bandwidth, h"""
self._h = value | Setter for kernel bandwidth, h | seth | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def isInDomain(xy):
"""Used for filter to check if point is in the domain"""
u = (xy[0]-x)/self.h
return np.all((u >= self.domain[0]) & (u <= self.domain[1])) | Used for filter to check if point is in the domain | in_domain.isInDomain | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def in_domain(self, xs, ys, x):
"""
Returns the filtered (xs, ys) based on the Kernel domain centred on x
"""
# Disable black-list functions: filter used for speed instead of
# list-comprehension
# pylint: disable-msg=W0141
def isInDomain(xy):
"""Used for filter to check if point is in the domain"""
u = (xy[0]-x)/self.h
return np.all((u >= self.domain[0]) & (u <= self.domain[1]))
if self.domain is None:
return (xs, ys)
else:
filtered = lfilter(isInDomain, lzip(xs, ys))
if len(filtered) > 0:
xs, ys = lzip(*filtered)
return (xs, ys)
else:
return ([], []) | Returns the filtered (xs, ys) based on the Kernel domain centred on x | in_domain | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def density(self, xs, x):
"""Returns the kernel density estimate for point x based on x-values
xs
"""
xs = np.asarray(xs)
n = len(xs) # before in_domain?
if self.weights is not None:
xs, weights = self.in_domain( xs, self.weights, x )
else:
xs = self.in_domain( xs, xs, x )[0]
xs = np.asarray(xs)
#print 'len(xs)', len(xs), x
if xs.ndim == 1:
xs = xs[:,None]
if len(xs)>0:
h = self.h
if self.weights is not None:
w = 1 / h * np.sum(self((xs-x)/h).T * weights, axis=1)
else:
w = 1. / (h * n) * np.sum(self((xs-x)/h), axis=0)
return w
else:
return np.nan | Returns the kernel density estimate for point x based on x-values
xs | density | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def density_var(self, density, nobs):
"""approximate pointwise variance for kernel density
not verified
Parameters
----------
density : array_lie
pdf of the kernel density
nobs : int
number of observations used in the KDE estimation
Returns
-------
kde_var : ndarray
estimated variance of the density estimate
Notes
-----
This uses the asymptotic normal approximation to the distribution of
the density estimate.
"""
return np.asarray(density) * self.L2Norm / self.h / nobs | approximate pointwise variance for kernel density
not verified
Parameters
----------
density : array_lie
pdf of the kernel density
nobs : int
number of observations used in the KDE estimation
Returns
-------
kde_var : ndarray
estimated variance of the density estimate
Notes
-----
This uses the asymptotic normal approximation to the distribution of
the density estimate. | density_var | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def density_confint(self, density, nobs, alpha=0.05):
"""approximate pointwise confidence interval for kernel density
The confidence interval is centered at the estimated density and
ignores the bias of the density estimate.
not verified
Parameters
----------
density : array_lie
pdf of the kernel density
nobs : int
number of observations used in the KDE estimation
Returns
-------
conf_int : ndarray
estimated confidence interval of the density estimate, lower bound
in first column and upper bound in second column
Notes
-----
This uses the asymptotic normal approximation to the distribution of
the density estimate. The lower bound can be negative for density
values close to zero.
"""
from scipy import stats
crit = stats.norm.isf(alpha / 2.)
density = np.asarray(density)
half_width = crit * np.sqrt(self.density_var(density, nobs))
conf_int = np.column_stack((density - half_width, density + half_width))
return conf_int | approximate pointwise confidence interval for kernel density
The confidence interval is centered at the estimated density and
ignores the bias of the density estimate.
not verified
Parameters
----------
density : array_lie
pdf of the kernel density
nobs : int
number of observations used in the KDE estimation
Returns
-------
conf_int : ndarray
estimated confidence interval of the density estimate, lower bound
in first column and upper bound in second column
Notes
-----
This uses the asymptotic normal approximation to the distribution of
the density estimate. The lower bound can be negative for density
values close to zero. | density_confint | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def smooth(self, xs, ys, x):
"""Returns the kernel smoothing estimate for point x based on x-values
xs and y-values ys.
Not expected to be called by the user.
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs)>0:
w = np.sum(self((xs-x)/self.h))
#TODO: change the below to broadcasting when shape is sorted
v = np.sum([yy*self((xx-x)/self.h) for xx, yy in zip(xs, ys)])
return v / w
else:
return np.nan | Returns the kernel smoothing estimate for point x based on x-values
xs and y-values ys.
Not expected to be called by the user. | smooth | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def smoothvar(self, xs, ys, x):
"""Returns the kernel smoothing estimate of the variance at point x.
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs) > 0:
fittedvals = np.array([self.smooth(xs, ys, xx) for xx in xs])
sqresid = square( subtract(ys, fittedvals) )
w = np.sum(self((xs-x)/self.h))
v = np.sum([rr*self((xx-x)/self.h) for xx, rr in zip(xs, sqresid)])
return v / w
else:
return np.nan | Returns the kernel smoothing estimate of the variance at point x. | smoothvar | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def smoothconf(self, xs, ys, x, alpha=0.05):
"""Returns the kernel smoothing estimate with confidence 1sigma bounds
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs) > 0:
fittedvals = np.array([self.smooth(xs, ys, xx) for xx in xs])
#fittedvals = self.smooth(xs, ys, x) # x or xs in Haerdle
sqresid = square(
subtract(ys, fittedvals)
)
w = np.sum(self((xs-x)/self.h))
#var = sqresid.sum() / (len(sqresid) - 0) # nonlocal var ? JP just trying
v = np.sum([rr*self((xx-x)/self.h) for xx, rr in zip(xs, sqresid)])
var = v / w
sd = np.sqrt(var)
K = self.L2Norm
yhat = self.smooth(xs, ys, x)
from scipy import stats
crit = stats.norm.isf(alpha / 2)
err = crit * sd * np.sqrt(K) / np.sqrt(w * self.h * self.norm_const)
return (yhat - err, yhat, yhat + err)
else:
return (np.nan, np.nan, np.nan) | Returns the kernel smoothing estimate with confidence 1sigma bounds | smoothconf | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def L2Norm(self):
"""Returns the integral of the square of the kernal from -inf to inf"""
if self._L2Norm is None:
def L2Func(x):
return (self.norm_const * self._shape(x)) ** 2
if self.domain is None:
self._L2Norm = scipy.integrate.quad(L2Func, -inf, inf)[0]
else:
self._L2Norm = scipy.integrate.quad(L2Func, self.domain[0],
self.domain[1])[0]
return self._L2Norm | Returns the integral of the square of the kernal from -inf to inf | L2Norm | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def norm_const(self):
"""
Normalising constant for kernel (integral from -inf to inf)
"""
if self._normconst is None:
if self.domain is None:
quadres = scipy.integrate.quad(self._shape, -inf, inf)
else:
quadres = scipy.integrate.quad(self._shape, self.domain[0],
self.domain[1])
self._normconst = 1.0/(quadres[0])
return self._normconst | Normalising constant for kernel (integral from -inf to inf) | norm_const | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def kernel_var(self):
"""Returns the second moment of the kernel"""
if self._kernel_var is None:
def func(x):
return x ** 2 * self.norm_const * self._shape(x)
if self.domain is None:
self._kernel_var = scipy.integrate.quad(func, -inf, inf)[0]
else:
self._kernel_var = scipy.integrate.quad(func, self.domain[0],
self.domain[1])[0]
return self._kernel_var | Returns the second moment of the kernel | kernel_var | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def normal_reference_constant(self):
"""
Constant used for silverman normal reference asymtotic bandwidth
calculation.
C = 2((pi^(1/2)*(nu!)^3 R(k))/(2nu(2nu)!kap_nu(k)^2))^(1/(2nu+1))
nu = kernel order
kap_nu = nu'th moment of kernel
R = kernel roughness (square of L^2 norm)
Note: L2Norm property returns square of norm.
"""
nu = self._order
if not nu == 2:
msg = "Only implemented for second order kernels"
raise NotImplementedError(msg)
if self._normal_reference_constant is None:
C = np.pi**(.5) * factorial(nu)**3 * self.L2Norm
C /= (2 * nu * factorial(2 * nu) * self.moments(nu)**2)
C = 2*C**(1.0/(2*nu+1))
self._normal_reference_constant = C
return self._normal_reference_constant | Constant used for silverman normal reference asymtotic bandwidth
calculation.
C = 2((pi^(1/2)*(nu!)^3 R(k))/(2nu(2nu)!kap_nu(k)^2))^(1/(2nu+1))
nu = kernel order
kap_nu = nu'th moment of kernel
R = kernel roughness (square of L^2 norm)
Note: L2Norm property returns square of norm. | normal_reference_constant | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def weight(self, x):
"""This returns the normalised weight at distance x"""
return self.norm_const*self._shape(x) | This returns the normalised weight at distance x | weight | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def __call__(self, x):
"""
This simply returns the value of the kernel function at x
Does the same as weight if the function is normalised
"""
return self._shape(x) | This simply returns the value of the kernel function at x
Does the same as weight if the function is normalised | __call__ | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def smooth(self, xs, ys, x):
"""Returns the kernel smoothing estimate for point x based on x-values
xs and y-values ys.
Not expected to be called by the user.
Special implementation optimized for Biweight.
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs) > 0:
w = np.sum(square(subtract(1, square(divide(subtract(xs, x),
self.h)))))
v = np.sum(multiply(ys, square(subtract(1, square(divide(
subtract(xs, x), self.h))))))
return v / w
else:
return np.nan | Returns the kernel smoothing estimate for point x based on x-values
xs and y-values ys.
Not expected to be called by the user.
Special implementation optimized for Biweight. | smooth | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def smoothvar(self, xs, ys, x):
"""
Returns the kernel smoothing estimate of the variance at point x.
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs) > 0:
fittedvals = np.array([self.smooth(xs, ys, xx) for xx in xs])
rs = square(subtract(ys, fittedvals))
w = np.sum(square(subtract(1.0, square(divide(subtract(xs, x),
self.h)))))
v = np.sum(multiply(rs, square(subtract(1, square(divide(
subtract(xs, x), self.h))))))
return v / w
else:
return np.nan | Returns the kernel smoothing estimate of the variance at point x. | smoothvar | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def smoothconf_(self, xs, ys, x):
"""Returns the kernel smoothing estimate with confidence 1sigma bounds
"""
xs, ys = self.in_domain(xs, ys, x)
if len(xs) > 0:
fittedvals = np.array([self.smooth(xs, ys, xx) for xx in xs])
rs = square(subtract(ys, fittedvals))
w = np.sum(square(subtract(1.0, square(divide(subtract(xs, x),
self.h)))))
v = np.sum(multiply(rs, square(subtract(1, square(divide(
subtract(xs, x), self.h))))))
var = v / w
sd = np.sqrt(var)
K = self.L2Norm
yhat = self.smooth(xs, ys, x)
err = sd * K / np.sqrt(0.9375 * w * self.h)
return (yhat - err, yhat, yhat + err)
else:
return (np.nan, np.nan, np.nan) | Returns the kernel smoothing estimate with confidence 1sigma bounds | smoothconf_ | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def smooth(self, xs, ys, x):
"""Returns the kernel smoothing estimate for point x based on x-values
xs and y-values ys.
Not expected to be called by the user.
Special implementation optimized for Gaussian.
"""
w = np.sum(exp(multiply(square(divide(subtract(xs, x),
self.h)),-0.5)))
v = np.sum(multiply(ys, exp(multiply(square(divide(subtract(xs, x),
self.h)), -0.5))))
return v/w | Returns the kernel smoothing estimate for point x based on x-values
xs and y-values ys.
Not expected to be called by the user.
Special implementation optimized for Gaussian. | smooth | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kernels.py | BSD-3-Clause |
def _compute_covariance_(self):
'''not used'''
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n | not used | _compute_covariance_ | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/kdecovclass.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/kdecovclass.py | BSD-3-Clause |
def predict(self, x):
"""
Returns the kernel smoothed prediction at x
If x is a real number then a single value is returned.
Otherwise an attempt is made to cast x to numpy.ndarray and an array of
corresponding y-points is returned.
"""
if np.size(x) == 1: # if isinstance(x, numbers.Real):
return self.Kernel.smooth(self.x, self.y, x)
else:
return np.array([self.Kernel.smooth(self.x, self.y, xx) for xx
in np.array(x)]) | Returns the kernel smoothed prediction at x
If x is a real number then a single value is returned.
Otherwise an attempt is made to cast x to numpy.ndarray and an array of
corresponding y-points is returned. | predict | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/smoothers.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/smoothers.py | BSD-3-Clause |
def conf(self, x):
"""
Returns the fitted curve and 1-sigma upper and lower point-wise
confidence.
These bounds are based on variance only, and do not include the bias.
If the bandwidth is much larger than the curvature of the underlying
function then the bias could be large.
x is the points on which you want to evaluate the fit and the errors.
Alternatively if x is specified as a positive integer, then the fit and
confidence bands points will be returned after every
xth sample point - so they are closer together where the data
is denser.
"""
if isinstance(x, int):
sorted_x = np.sort(np.array(self.x))
confx = sorted_x[::x]
conffit = self.conf(confx)
return (confx, conffit)
else:
return np.array([self.Kernel.smoothconf(self.x, self.y, xx)
for xx in x]) | Returns the fitted curve and 1-sigma upper and lower point-wise
confidence.
These bounds are based on variance only, and do not include the bias.
If the bandwidth is much larger than the curvature of the underlying
function then the bias could be large.
x is the points on which you want to evaluate the fit and the errors.
Alternatively if x is specified as a positive integer, then the fit and
confidence bands points will be returned after every
xth sample point - so they are closer together where the data
is denser. | conf | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/smoothers.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/smoothers.py | BSD-3-Clause |
def df_fit(self):
'''alias of df_model for backwards compatibility
'''
return self.df_model() | alias of df_model for backwards compatibility | df_fit | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/smoothers.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/smoothers.py | BSD-3-Clause |
def df_model(self):
"""
Degrees of freedom used in the fit.
"""
return self.order + 1 | Degrees of freedom used in the fit. | df_model | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/smoothers.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/smoothers.py | BSD-3-Clause |
def smooth(self,*args, **kwds):
'''alias for fit, for backwards compatibility,
do we need it with different behavior than fit?
'''
return self.fit(*args, **kwds) | alias for fit, for backwards compatibility,
do we need it with different behavior than fit? | smooth | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/smoothers.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/smoothers.py | BSD-3-Clause |
def df_resid(self):
"""
Residual degrees of freedom from last fit.
"""
return self.N - self.order - 1 | Residual degrees of freedom from last fit. | df_resid | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/smoothers.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/smoothers.py | BSD-3-Clause |
def fg1(x):
'''Fan and Gijbels example function 1
'''
return x + 2 * np.exp(-16 * x**2) | Fan and Gijbels example function 1 | fg1 | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/dgp_examples.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/dgp_examples.py | BSD-3-Clause |
def fg1eu(x):
'''Eubank similar to Fan and Gijbels example function 1
'''
return x + 0.5 * np.exp(-50 * (x - 0.5)**2) | Eubank similar to Fan and Gijbels example function 1 | fg1eu | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/dgp_examples.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/dgp_examples.py | BSD-3-Clause |
def fg2(x):
'''Fan and Gijbels example function 2
'''
return np.sin(2 * x) + 2 * np.exp(-16 * x**2) | Fan and Gijbels example function 2 | fg2 | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/dgp_examples.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/dgp_examples.py | BSD-3-Clause |
def func1(x):
'''made up example with sin, square
'''
return np.sin(x * 5) / x + 2. * x - 1. * x**2 | made up example with sin, square | func1 | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/dgp_examples.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/dgp_examples.py | BSD-3-Clause |
def plot(self, scatter=True, ax=None):
'''plot the mean function and optionally the scatter of the sample
Parameters
----------
scatter : bool
If true, then add scatterpoints of sample to plot.
ax : None or matplotlib axis instance
If None, then a matplotlib.pyplot figure is created, otherwise
the given axis, ax, is used.
Returns
-------
Figure
This is either the created figure instance or the one associated
with ax if ax is given.
'''
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
if scatter:
ax.plot(self.x, self.y, 'o', alpha=0.5)
xx = np.linspace(self.x.min(), self.x.max(), 100)
ax.plot(xx, self.func(xx), lw=2, color='b', label='dgp mean')
return ax.figure | plot the mean function and optionally the scatter of the sample
Parameters
----------
scatter : bool
If true, then add scatterpoints of sample to plot.
ax : None or matplotlib axis instance
If None, then a matplotlib.pyplot figure is created, otherwise
the given axis, ax, is used.
Returns
-------
Figure
This is either the created figure instance or the one associated
with ax if ax is given. | plot | python | statsmodels/statsmodels | statsmodels/sandbox/nonparametric/dgp_examples.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/nonparametric/dgp_examples.py | BSD-3-Clause |
def mcarma22(niter=10, nsample=1000, ar=None, ma=None, sig=0.5):
'''run Monte Carlo for ARMA(2,2)
DGP parameters currently hard coded
also sample size `nsample`
was not a self contained function, used instances from outer scope
now corrected
'''
#nsample = 1000
#ar = [1.0, 0, 0]
if ar is None:
ar = [1.0, -0.55, -0.1]
#ma = [1.0, 0, 0]
if ma is None:
ma = [1.0, 0.3, 0.2]
results = []
results_bse = []
for _ in range(niter):
y2 = arma_generate_sample(ar,ma,nsample+1000, sig)[-nsample:]
y2 -= y2.mean()
arest2 = Arma(y2)
rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2,2))
results.append(rhohat2a)
err2a = arest2.geterrors(rhohat2a)
sige2a = np.sqrt(np.dot(err2a,err2a)/nsample)
#print('sige2a', sige2a,
#print('cov_x2a.shape', cov_x2a.shape
#results_bse.append(sige2a * np.sqrt(np.diag(cov_x2a)))
if cov_x2a is not None:
results_bse.append(sige2a * np.sqrt(np.diag(cov_x2a)))
else:
results_bse.append(np.nan + np.zeros_like(rhohat2a))
return np.r_[ar[1:], ma[1:]], np.array(results), np.array(results_bse) | run Monte Carlo for ARMA(2,2)
DGP parameters currently hard coded
also sample size `nsample`
was not a self contained function, used instances from outer scope
now corrected | mcarma22 | python | statsmodels/statsmodels | statsmodels/sandbox/mcevaluate/arma.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/mcevaluate/arma.py | BSD-3-Clause |
def __init__(self, n):
"""
Leave-One-Out cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4]]
>>> y = [1, 2]
>>> loo = cross_val.LeaveOneOut(2)
>>> for train_index, test_index in loo:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
... print X_train, X_test, y_train, y_test
TRAIN: [False True] TEST: [ True False]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [ True False] TEST: [False True]
[[1 2]] [[3 4]] [1] [2]
"""
self.n = n | Leave-One-Out cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4]]
>>> y = [1, 2]
>>> loo = cross_val.LeaveOneOut(2)
>>> for train_index, test_index in loo:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
... print X_train, X_test, y_train, y_test
TRAIN: [False True] TEST: [ True False]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [ True False] TEST: [False True]
[[1 2]] [[3 4]] [1] [2] | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/tools/cross_val.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/cross_val.py | BSD-3-Clause |
def __init__(self, n, p):
"""
Leave-P-Out cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
p: int
Size test sets
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4], [5, 6], [7, 8]]
>>> y = [1, 2, 3, 4]
>>> lpo = cross_val.LeavePOut(4, 2)
>>> for train_index, test_index in lpo:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
TRAIN: [False False True True] TEST: [ True True False False]
TRAIN: [False True False True] TEST: [ True False True False]
TRAIN: [False True True False] TEST: [ True False False True]
TRAIN: [ True False False True] TEST: [False True True False]
TRAIN: [ True False True False] TEST: [False True False True]
TRAIN: [ True True False False] TEST: [False False True True]
"""
self.n = n
self.p = p | Leave-P-Out cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
p: int
Size test sets
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4], [5, 6], [7, 8]]
>>> y = [1, 2, 3, 4]
>>> lpo = cross_val.LeavePOut(4, 2)
>>> for train_index, test_index in lpo:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
TRAIN: [False False True True] TEST: [ True True False False]
TRAIN: [False True False True] TEST: [ True False True False]
TRAIN: [False True True False] TEST: [ True False False True]
TRAIN: [ True False False True] TEST: [False True True False]
TRAIN: [ True False True False] TEST: [False True False True]
TRAIN: [ True True False False] TEST: [False False True True] | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/tools/cross_val.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/cross_val.py | BSD-3-Clause |
def __init__(self, n, k):
"""
K-Folds cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
k: int
number of folds
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4], [1, 2], [3, 4]]
>>> y = [1, 2, 3, 4]
>>> kf = cross_val.KFold(4, k=2)
>>> for train_index, test_index in kf:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
TRAIN: [False False True True] TEST: [ True True False False]
TRAIN: [ True True False False] TEST: [False False True True]
Notes
-----
All the folds have size trunc(n/k), the last one has the complementary
"""
assert k>0, ValueError('cannot have k below 1')
assert k<n, ValueError('cannot have k=%d greater than %d'% (k, n))
self.n = n
self.k = k | K-Folds cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
k: int
number of folds
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4], [1, 2], [3, 4]]
>>> y = [1, 2, 3, 4]
>>> kf = cross_val.KFold(4, k=2)
>>> for train_index, test_index in kf:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
TRAIN: [False False True True] TEST: [ True True False False]
TRAIN: [ True True False False] TEST: [False False True True]
Notes
-----
All the folds have size trunc(n/k), the last one has the complementary | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/tools/cross_val.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/cross_val.py | BSD-3-Clause |
def __init__(self, labels):
"""
Leave-One-Label_Out cross validation:
Provides train/test indexes to split data in train test sets
Parameters
----------
labels : list
List of labels
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4], [5, 6], [7, 8]]
>>> y = [1, 2, 1, 2]
>>> labels = [1, 1, 2, 2]
>>> lol = cross_val.LeaveOneLabelOut(labels)
>>> for train_index, test_index in lol:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, \
test_index, X, y)
... print X_train, X_test, y_train, y_test
TRAIN: [False False True True] TEST: [ True True False False]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [ True True False False] TEST: [False False True True]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
self.labels = labels | Leave-One-Label_Out cross validation:
Provides train/test indexes to split data in train test sets
Parameters
----------
labels : list
List of labels
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4], [5, 6], [7, 8]]
>>> y = [1, 2, 1, 2]
>>> labels = [1, 1, 2, 2]
>>> lol = cross_val.LeaveOneLabelOut(labels)
>>> for train_index, test_index in lol:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, \
test_index, X, y)
... print X_train, X_test, y_train, y_test
TRAIN: [False False True True] TEST: [ True True False False]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [ True True False False] TEST: [False False True True]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2] | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/tools/cross_val.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/cross_val.py | BSD-3-Clause |
def split(train_indexes, test_indexes, *args):
"""
For each arg return a train and test subsets defined by indexes provided
in train_indexes and test_indexes
"""
ret = []
for arg in args:
arg = np.asanyarray(arg)
arg_train = arg[train_indexes]
arg_test = arg[test_indexes]
ret.append(arg_train)
ret.append(arg_test)
return ret | For each arg return a train and test subsets defined by indexes provided
in train_indexes and test_indexes | split | python | statsmodels/statsmodels | statsmodels/sandbox/tools/cross_val.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/cross_val.py | BSD-3-Clause |
def __init__(self, n, k=1, start=None, kall=True, return_slice=True):
"""
KStepAhead cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
k : int
number of steps ahead
start : int
initial size of data for fitting
kall : bool
if true. all values for up to k-step ahead are included in the test index.
If false, then only the k-th step ahead value is returnd
Notes
-----
I do not think this is really useful, because it can be done with
a very simple loop instead.
Useful as a plugin, but it could return slices instead for faster array access.
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4]]
>>> y = [1, 2]
>>> loo = cross_val.LeaveOneOut(2)
>>> for train_index, test_index in loo:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
... print X_train, X_test, y_train, y_test
TRAIN: [False True] TEST: [ True False]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [ True False] TEST: [False True]
[[1 2]] [[3 4]] [1] [2]
"""
self.n = n
self.k = k
if start is None:
start = int(np.trunc(n*0.25)) # pick something arbitrary
self.start = start
self.kall = kall
self.return_slice = return_slice | KStepAhead cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
k : int
number of steps ahead
start : int
initial size of data for fitting
kall : bool
if true. all values for up to k-step ahead are included in the test index.
If false, then only the k-th step ahead value is returnd
Notes
-----
I do not think this is really useful, because it can be done with
a very simple loop instead.
Useful as a plugin, but it could return slices instead for faster array access.
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4]]
>>> y = [1, 2]
>>> loo = cross_val.LeaveOneOut(2)
>>> for train_index, test_index in loo:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
... print X_train, X_test, y_train, y_test
TRAIN: [False True] TEST: [ True False]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [ True False] TEST: [False True]
[[1 2]] [[3 4]] [1] [2] | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/tools/cross_val.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/cross_val.py | BSD-3-Clause |
def pca(data, keepdim=0, normalize=0, demean=True):
'''principal components with eigenvector decomposition
similar to princomp in matlab
Parameters
----------
data : ndarray, 2d
data with observations by rows and variables in columns
keepdim : int
number of eigenvectors to keep
if keepdim is zero, then all eigenvectors are included
normalize : bool
if true, then eigenvectors are normalized by sqrt of eigenvalues
demean : bool
if true, then the column mean is subtracted from the data
Returns
-------
xreduced : ndarray, 2d, (nobs, nvars)
projection of the data x on the kept eigenvectors
factors : ndarray, 2d, (nobs, nfactors)
factor matrix, given by np.dot(x, evecs)
evals : ndarray, 2d, (nobs, nfactors)
eigenvalues
evecs : ndarray, 2d, (nobs, nfactors)
eigenvectors, normalized if normalize is true
Notes
-----
See Also
--------
pcasvd : principal component analysis using svd
'''
x = np.array(data)
#make copy so original does not change, maybe not necessary anymore
if demean:
m = x.mean(0)
else:
m = np.zeros(x.shape[1])
x -= m
# Covariance matrix
xcov = np.cov(x, rowvar=0)
# Compute eigenvalues and sort into descending order
evals, evecs = np.linalg.eig(xcov)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:,indices]
evals = evals[indices]
if keepdim > 0 and keepdim < x.shape[1]:
evecs = evecs[:,:keepdim]
evals = evals[:keepdim]
if normalize:
#for i in range(shape(evecs)[1]):
# evecs[:,i] / linalg.norm(evecs[:,i]) * sqrt(evals[i])
evecs = evecs/np.sqrt(evals) #np.sqrt(np.dot(evecs.T, evecs) * evals)
# get factor matrix
#x = np.dot(evecs.T, x.T)
factors = np.dot(x, evecs)
# get original data from reduced number of components
#xreduced = np.dot(evecs.T, factors) + m
#print x.shape, factors.shape, evecs.shape, m.shape
xreduced = np.dot(factors, evecs.T) + m
return xreduced, factors, evals, evecs | principal components with eigenvector decomposition
similar to princomp in matlab
Parameters
----------
data : ndarray, 2d
data with observations by rows and variables in columns
keepdim : int
number of eigenvectors to keep
if keepdim is zero, then all eigenvectors are included
normalize : bool
if true, then eigenvectors are normalized by sqrt of eigenvalues
demean : bool
if true, then the column mean is subtracted from the data
Returns
-------
xreduced : ndarray, 2d, (nobs, nvars)
projection of the data x on the kept eigenvectors
factors : ndarray, 2d, (nobs, nfactors)
factor matrix, given by np.dot(x, evecs)
evals : ndarray, 2d, (nobs, nfactors)
eigenvalues
evecs : ndarray, 2d, (nobs, nfactors)
eigenvectors, normalized if normalize is true
Notes
-----
See Also
--------
pcasvd : principal component analysis using svd | pca | python | statsmodels/statsmodels | statsmodels/sandbox/tools/tools_pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/tools_pca.py | BSD-3-Clause |
def pcasvd(data, keepdim=0, demean=True):
'''principal components with svd
Parameters
----------
data : ndarray, 2d
data with observations by rows and variables in columns
keepdim : int
number of eigenvectors to keep
if keepdim is zero, then all eigenvectors are included
demean : bool
if true, then the column mean is subtracted from the data
Returns
-------
xreduced : ndarray, 2d, (nobs, nvars)
projection of the data x on the kept eigenvectors
factors : ndarray, 2d, (nobs, nfactors)
factor matrix, given by np.dot(x, evecs)
evals : ndarray, 2d, (nobs, nfactors)
eigenvalues
evecs : ndarray, 2d, (nobs, nfactors)
eigenvectors, normalized if normalize is true
See Also
--------
pca : principal component analysis using eigenvector decomposition
Notes
-----
This does not have yet the normalize option of pca.
'''
nobs, nvars = data.shape
#print nobs, nvars, keepdim
x = np.array(data)
#make copy so original does not change
if demean:
m = x.mean(0)
else:
m = 0
## if keepdim == 0:
## keepdim = nvars
## "print reassigning keepdim to max", keepdim
x -= m
U, s, v = np.linalg.svd(x.T, full_matrices=1)
factors = np.dot(U.T, x.T).T #princomps
if keepdim:
xreduced = np.dot(factors[:,:keepdim], U[:,:keepdim].T) + m
else:
xreduced = data
keepdim = nvars
"print reassigning keepdim to max", keepdim
# s = evals, U = evecs
# no idea why denominator for s is with minus 1
evals = s**2/(x.shape[0]-1)
#print keepdim
return xreduced, factors[:,:keepdim], evals[:keepdim], U[:,:keepdim] #, v | principal components with svd
Parameters
----------
data : ndarray, 2d
data with observations by rows and variables in columns
keepdim : int
number of eigenvectors to keep
if keepdim is zero, then all eigenvectors are included
demean : bool
if true, then the column mean is subtracted from the data
Returns
-------
xreduced : ndarray, 2d, (nobs, nvars)
projection of the data x on the kept eigenvectors
factors : ndarray, 2d, (nobs, nfactors)
factor matrix, given by np.dot(x, evecs)
evals : ndarray, 2d, (nobs, nfactors)
eigenvalues
evecs : ndarray, 2d, (nobs, nfactors)
eigenvectors, normalized if normalize is true
See Also
--------
pca : principal component analysis using eigenvector decomposition
Notes
-----
This does not have yet the normalize option of pca. | pcasvd | python | statsmodels/statsmodels | statsmodels/sandbox/tools/tools_pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/tools_pca.py | BSD-3-Clause |
def run(self, nrepl, statindices=None, dgpargs=[], statsargs=[]):
'''run the actual Monte Carlo and save results
Parameters
----------
nrepl : int
number of Monte Carlo repetitions
statindices : None or list of integers
determines which values of the return of the statistic
functions are stored in the Monte Carlo. Default None
means the entire return. If statindices is a list of
integers, then it will be used as index into the return.
dgpargs : tuple
optional parameters for the DGP
statsargs : tuple
optional parameters for the statistics function
Returns
-------
None, all results are attached
'''
self.nrepl = nrepl
self.statindices = statindices
self.dgpargs = dgpargs
self.statsargs = statsargs
dgp = self.dgp
statfun = self.statistic # name ?
#introspect len of return of statfun,
#possible problems with ndim>1, check ValueError
mcres0 = statfun(dgp(*dgpargs), *statsargs)
self.nreturn = nreturns = len(np.ravel(mcres0))
#single return statistic
if statindices is None:
#self.nreturn = nreturns = 1
mcres = np.zeros(nrepl)
mcres[0] = mcres0
for ii in range(1, nrepl-1, nreturns):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
#should I ravel?
mcres[ii] = statfun(x, *statsargs)
#more than one return statistic
else:
self.nreturn = nreturns = len(statindices)
self.mcres = mcres = np.zeros((nrepl, nreturns))
mcres[0] = [mcres0[i] for i in statindices]
for ii in range(1, nrepl-1):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
ret = statfun(x, *statsargs)
mcres[ii] = [ret[i] for i in statindices]
self.mcres = mcres | run the actual Monte Carlo and save results
Parameters
----------
nrepl : int
number of Monte Carlo repetitions
statindices : None or list of integers
determines which values of the return of the statistic
functions are stored in the Monte Carlo. Default None
means the entire return. If statindices is a list of
integers, then it will be used as index into the return.
dgpargs : tuple
optional parameters for the DGP
statsargs : tuple
optional parameters for the statistics function
Returns
-------
None, all results are attached | run | python | statsmodels/statsmodels | statsmodels/sandbox/tools/mctools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/mctools.py | BSD-3-Clause |
def histogram(self, idx=None, critval=None):
'''calculate histogram values
does not do any plotting
I do not remember what I wanted here, looks similar to the new cdf
method, but this also does a binned pdf (self.histo)
'''
if self.mcres.ndim == 2:
if idx is not None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
if critval is None:
histo = np.histogram(mcres, bins=10)
else:
if not critval[0] == -np.inf:
bins=np.r_[-np.inf, critval, np.inf]
if not critval[0] == -np.inf:
bins=np.r_[bins, np.inf]
histo = np.histogram(mcres,
bins=np.r_[-np.inf, critval, np.inf])
self.histo = histo
self.cumhisto = np.cumsum(histo[0])*1./self.nrepl
self.cumhistoreversed = np.cumsum(histo[0][::-1])[::-1]*1./self.nrepl
return histo, self.cumhisto, self.cumhistoreversed | calculate histogram values
does not do any plotting
I do not remember what I wanted here, looks similar to the new cdf
method, but this also does a binned pdf (self.histo) | histogram | python | statsmodels/statsmodels | statsmodels/sandbox/tools/mctools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/mctools.py | BSD-3-Clause |
def quantiles(self, idx=None, frac=[0.01, 0.025, 0.05, 0.1, 0.975]):
'''calculate quantiles of Monte Carlo results
similar to ppf
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
Defines which quantiles should be calculated. For example a frac
of 0.1 finds the 10% quantile, x such that cdf(x)=0.1
Returns
-------
frac : ndarray
same values as input, TODO: I should drop this again ?
quantiles : ndarray, (len(frac), len(idx))
the quantiles with frac in rows and idx variables in columns
Notes
-----
rename to ppf ? make frac required
change sequence idx, frac
'''
if self.mcres.ndim == 2:
if idx is not None:
self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
pass
self.frac = frac = np.asarray(frac)
mc_sorted = self.get_mc_sorted()[:,idx]
return frac, mc_sorted[(self.nrepl*frac).astype(int)] | calculate quantiles of Monte Carlo results
similar to ppf
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
Defines which quantiles should be calculated. For example a frac
of 0.1 finds the 10% quantile, x such that cdf(x)=0.1
Returns
-------
frac : ndarray
same values as input, TODO: I should drop this again ?
quantiles : ndarray, (len(frac), len(idx))
the quantiles with frac in rows and idx variables in columns
Notes
-----
rename to ppf ? make frac required
change sequence idx, frac | quantiles | python | statsmodels/statsmodels | statsmodels/sandbox/tools/mctools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/mctools.py | BSD-3-Clause |
def cdf(self, x, idx=None):
'''calculate cumulative probabilities of Monte Carlo results
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
Defines which quantiles should be calculated. For example a frac
of 0.1 finds the 10% quantile, x such that cdf(x)=0.1
Returns
-------
x : ndarray
same as input, TODO: I should drop this again ?
probs : ndarray, (len(x), len(idx))
the quantiles with frac in rows and idx variables in columns
'''
idx = np.atleast_1d(idx).tolist() #assure iterable, use list ?
# if self.mcres.ndim == 2:
# if not idx is None:
# mcres = self.mcres[:,idx]
# else:
# raise ValueError('currently only 1 statistic at a time')
# else:
# mcres = self.mcres
mc_sorted = self.get_mc_sorted()
x = np.asarray(x)
#TODO:autodetect or explicit option ?
if x.ndim > 1 and x.shape[1]==len(idx):
use_xi = True
else:
use_xi = False
x_ = x #alias
probs = []
for i,ix in enumerate(idx):
if use_xi:
x_ = x[:,i]
probs.append(np.searchsorted(mc_sorted[:,ix], x_)/float(self.nrepl))
probs = np.asarray(probs).T
return x, probs | calculate cumulative probabilities of Monte Carlo results
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
Defines which quantiles should be calculated. For example a frac
of 0.1 finds the 10% quantile, x such that cdf(x)=0.1
Returns
-------
x : ndarray
same as input, TODO: I should drop this again ?
probs : ndarray, (len(x), len(idx))
the quantiles with frac in rows and idx variables in columns | cdf | python | statsmodels/statsmodels | statsmodels/sandbox/tools/mctools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/mctools.py | BSD-3-Clause |
def plot_hist(self, idx, distpdf=None, bins=50, ax=None, kwds=None):
'''plot the histogram against a reference distribution
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
distpdf : callable
probability density function of reference distribution
bins : {int, array_like}
used unchanged for matplotlibs hist call
ax : TODO: not implemented yet
kwds : None or tuple of dicts
extra keyword options to the calls to the matplotlib functions,
first dictionary is for his, second dictionary for plot of the
reference distribution
Returns
-------
None
'''
if kwds is None:
kwds = ({},{})
if self.mcres.ndim == 2:
if idx is not None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
lsp = np.linspace(mcres.min(), mcres.max(), 100)
import matplotlib.pyplot as plt
#I do not want to figure this out now
# if ax=None:
# fig = plt.figure()
# ax = fig.addaxis()
plt.figure()
plt.hist(mcres, bins=bins, normed=True, **kwds[0])
plt.plot(lsp, distpdf(lsp), 'r', **kwds[1]) | plot the histogram against a reference distribution
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
distpdf : callable
probability density function of reference distribution
bins : {int, array_like}
used unchanged for matplotlibs hist call
ax : TODO: not implemented yet
kwds : None or tuple of dicts
extra keyword options to the calls to the matplotlib functions,
first dictionary is for his, second dictionary for plot of the
reference distribution
Returns
-------
None | plot_hist | python | statsmodels/statsmodels | statsmodels/sandbox/tools/mctools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/mctools.py | BSD-3-Clause |
def summary_quantiles(self, idx, distppf, frac=[0.01, 0.025, 0.05, 0.1, 0.975],
varnames=None, title=None):
'''summary table for quantiles (critical values)
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
distppf : callable
probability density function of reference distribution
TODO: use `crit` values instead or additional, see summary_cdf
frac : array_like, float
probabilities for which
varnames : None, or list of strings
optional list of variable names, same length as idx
Returns
-------
table : instance of SimpleTable
use `print(table` to see results
'''
idx = np.atleast_1d(idx) #assure iterable, use list ?
quant, mcq = self.quantiles(idx, frac=frac)
#not sure whether this will work with single quantile
#crit = stats.chi2([2,4]).ppf(np.atleast_2d(quant).T)
crit = distppf(np.atleast_2d(quant).T)
mml=[]
for i, ix in enumerate(idx): #TODO: hardcoded 2 ?
mml.extend([mcq[:,i], crit[:,i]])
#mmlar = np.column_stack(mml)
mmlar = np.column_stack([quant] + mml)
#print(mmlar.shape
if title:
title = title +' Quantiles (critical values)'
else:
title='Quantiles (critical values)'
#TODO use stub instead
if varnames is None:
varnames = ['var%d' % i for i in range(mmlar.shape[1]//2)]
headers = ['\nprob'] + [f'{i}\n{t}' for i in varnames for t in ['mc', 'dist']]
return SimpleTable(mmlar,
txt_fmt={'data_fmts': ["%#6.3f"]+["%#10.4f"]*(mmlar.shape[1]-1)},
title=title,
headers=headers) | summary table for quantiles (critical values)
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
distppf : callable
probability density function of reference distribution
TODO: use `crit` values instead or additional, see summary_cdf
frac : array_like, float
probabilities for which
varnames : None, or list of strings
optional list of variable names, same length as idx
Returns
-------
table : instance of SimpleTable
use `print(table` to see results | summary_quantiles | python | statsmodels/statsmodels | statsmodels/sandbox/tools/mctools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/mctools.py | BSD-3-Clause |
def summary_cdf(self, idx, frac, crit, varnames=None, title=None):
'''summary table for cumulative density function
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
probabilities for which
crit : array_like
values for which cdf is calculated
varnames : None, or list of strings
optional list of variable names, same length as idx
Returns
-------
table : instance of SimpleTable
use `print(table` to see results
'''
idx = np.atleast_1d(idx) #assure iterable, use list ?
mml=[]
#TODO:need broadcasting in cdf
for i in range(len(idx)):
#print(i, mc1.cdf(crit[:,i], [idx[i]])[1].ravel()
mml.append(self.cdf(crit[:,i], [idx[i]])[1].ravel())
#mml = self.cdf(crit, idx)[1]
#mmlar = np.column_stack(mml)
#print(mml[0].shape, np.shape(frac)
mmlar = np.column_stack([frac] + mml)
#print(mmlar.shape
if title:
title = title +' Probabilites'
else:
title='Probabilities'
#TODO use stub instead
#headers = ['\nprob'] + ['var%d\n%s' % (i, t) for i in range(mmlar.shape[1]-1) for t in ['mc']]
if varnames is None:
varnames = ['var%d' % i for i in range(mmlar.shape[1]-1)]
headers = ['prob'] + varnames
return SimpleTable(mmlar,
txt_fmt={'data_fmts': ["%#6.3f"]+["%#10.4f"]*(np.array(mml).shape[1]-1)},
title=title,
headers=headers) | summary table for cumulative density function
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
probabilities for which
crit : array_like
values for which cdf is calculated
varnames : None, or list of strings
optional list of variable names, same length as idx
Returns
-------
table : instance of SimpleTable
use `print(table` to see results | summary_cdf | python | statsmodels/statsmodels | statsmodels/sandbox/tools/mctools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/tools/mctools.py | BSD-3-Clause |
def contrast_allpairs(nm):
'''contrast or restriction matrix for all pairs of nm variables
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm*(nm-1)/2, nm)
contrast matrix for all pairwise comparisons
'''
contr = []
for i in range(nm):
for j in range(i+1, nm):
contr_row = np.zeros(nm)
contr_row[i] = 1
contr_row[j] = -1
contr.append(contr_row)
return np.array(contr) | contrast or restriction matrix for all pairs of nm variables
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm*(nm-1)/2, nm)
contrast matrix for all pairwise comparisons | contrast_allpairs | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def contrast_all_one(nm):
'''contrast or restriction matrix for all against first comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against first comparisons
'''
contr = np.column_stack((np.ones(nm-1), -np.eye(nm-1)))
return contr | contrast or restriction matrix for all against first comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against first comparisons | contrast_all_one | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def contrast_diff_mean(nm):
'''contrast or restriction matrix for all against mean comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against mean comparisons
'''
return np.eye(nm) - np.ones((nm,nm))/nm | contrast or restriction matrix for all against mean comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against mean comparisons | contrast_diff_mean | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def contrast_product(names1, names2, intgroup1=None, intgroup2=None, pairs=False):
'''build contrast matrices for products of two categorical variables
this is an experimental script and should be converted to a class
Parameters
----------
names1, names2 : lists of strings
contains the list of level labels for each categorical variable
intgroup1, intgroup2 : ndarrays TODO: this part not tested, finished yet
categorical variable
Notes
-----
This creates a full rank matrix. It does not do all pairwise comparisons,
parameterization is using contrast_all_one to get differences with first
level.
? does contrast_all_pairs work as a plugin to get all pairs ?
'''
n1 = len(names1)
n2 = len(names2)
names_prod = [f'{i}_{j}' for i in names1 for j in names2]
ee1 = np.zeros((1,n1))
ee1[0,0] = 1
if not pairs:
dd = np.r_[ee1, -contrast_all_one(n1)]
else:
dd = np.r_[ee1, -contrast_allpairs(n1)]
contrast_prod = np.kron(dd[1:], np.eye(n2))
contrast_labels(contrast_prod, names_prod, reverse=True)
names_contrast_prod = [''.join([f'{signstr(c, noplus=True)}{v}'
for c,v in zip(row, names_prod)[::-1] if c != 0])
for row in contrast_prod]
ee2 = np.zeros((1,n2))
ee2[0,0] = 1
#dd2 = np.r_[ee2, -contrast_all_one(n2)]
if not pairs:
dd2 = np.r_[ee2, -contrast_all_one(n2)]
else:
dd2 = np.r_[ee2, -contrast_allpairs(n2)]
contrast_prod2 = np.kron(np.eye(n1), dd2[1:])
names_contrast_prod2 = [''.join([f'{signstr(c, noplus=True)}{v}'
for c,v in zip(row, names_prod)[::-1] if c != 0])
for row in contrast_prod2]
if (intgroup1 is not None) and (intgroup1 is not None):
d1, _ = dummy_1d(intgroup1)
d2, _ = dummy_1d(intgroup2)
dummy = dummy_product(d1, d2)
else:
dummy = None
return (names_prod, contrast_prod, names_contrast_prod,
contrast_prod2, names_contrast_prod2, dummy) | build contrast matrices for products of two categorical variables
this is an experimental script and should be converted to a class
Parameters
----------
names1, names2 : lists of strings
contains the list of level labels for each categorical variable
intgroup1, intgroup2 : ndarrays TODO: this part not tested, finished yet
categorical variable
Notes
-----
This creates a full rank matrix. It does not do all pairwise comparisons,
parameterization is using contrast_all_one to get differences with first
level.
? does contrast_all_pairs work as a plugin to get all pairs ? | contrast_product | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def dummy_1d(x, varname=None):
'''dummy variable for id integer groups
Parameters
----------
x : ndarray, 1d
categorical variable, requires integers if varname is None
varname : str
name of the variable used in labels for category levels
Returns
-------
dummy : ndarray, 2d
array of dummy variables, one column for each level of the
category (full set)
labels : list[str]
labels for the columns, i.e. levels of each category
Notes
-----
use tools.categorical instead for more more options
See Also
--------
statsmodels.tools.categorical
Examples
--------
>>> x = np.array(['F', 'F', 'M', 'M', 'F', 'F', 'M', 'M', 'F', 'F', 'M', 'M'],
dtype='|S1')
>>> dummy_1d(x, varname='gender')
(array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]), ['gender_F', 'gender_M'])
'''
if varname is None: #assumes integer
labels = ['level_%d' % i for i in range(x.max() + 1)]
return (x[:,None]==np.arange(x.max()+1)).astype(int), labels
else:
grouplabels = np.unique(x)
labels = [varname + '_%s' % str(i) for i in grouplabels]
return (x[:,None]==grouplabels).astype(int), labels | dummy variable for id integer groups
Parameters
----------
x : ndarray, 1d
categorical variable, requires integers if varname is None
varname : str
name of the variable used in labels for category levels
Returns
-------
dummy : ndarray, 2d
array of dummy variables, one column for each level of the
category (full set)
labels : list[str]
labels for the columns, i.e. levels of each category
Notes
-----
use tools.categorical instead for more more options
See Also
--------
statsmodels.tools.categorical
Examples
--------
>>> x = np.array(['F', 'F', 'M', 'M', 'F', 'F', 'M', 'M', 'F', 'F', 'M', 'M'],
dtype='|S1')
>>> dummy_1d(x, varname='gender')
(array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]), ['gender_F', 'gender_M']) | dummy_1d | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def dummy_product(d1, d2, method='full'):
'''dummy variable from product of two dummy variables
Parameters
----------
d1, d2 : ndarray
two dummy variables, assumes full set for methods 'drop-last'
and 'drop-first'
method : {'full', 'drop-last', 'drop-first'}
'full' returns the full product, encoding of intersection of
categories.
The drop methods provide a difference dummy encoding:
(constant, main effects, interaction effects). The first or last columns
of the dummy variable (i.e. levels) are dropped to get full rank
dummy matrix.
Returns
-------
dummy : ndarray
dummy variable for product, see method
'''
if method == 'full':
dd = (d1[:,:,None]*d2[:,None,:]).reshape(d1.shape[0],-1)
elif method == 'drop-last': #same as SAS transreg
d12rl = dummy_product(d1[:,:-1], d2[:,:-1])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,:-1], d2[:,:-1],d12rl))
#Note: dtype int should preserve dtype of d1 and d2
elif method == 'drop-first':
d12r = dummy_product(d1[:,1:], d2[:,1:])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,1:], d2[:,1:],d12r))
else:
raise ValueError('method not recognized')
return dd | dummy variable from product of two dummy variables
Parameters
----------
d1, d2 : ndarray
two dummy variables, assumes full set for methods 'drop-last'
and 'drop-first'
method : {'full', 'drop-last', 'drop-first'}
'full' returns the full product, encoding of intersection of
categories.
The drop methods provide a difference dummy encoding:
(constant, main effects, interaction effects). The first or last columns
of the dummy variable (i.e. levels) are dropped to get full rank
dummy matrix.
Returns
-------
dummy : ndarray
dummy variable for product, see method | dummy_product | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def dummy_limits(d):
'''start and endpoints of groups in a sorted dummy variable array
helper function for nested categories
Examples
--------
>>> d1 = np.array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
>>> dummy_limits(d1)
(array([0, 4, 8]), array([ 4, 8, 12]))
get group slices from an array
>>> [np.arange(d1.shape[0])[b:e] for b,e in zip(*dummy_limits(d1))]
[array([0, 1, 2, 3]), array([4, 5, 6, 7]), array([ 8, 9, 10, 11])]
>>> [np.arange(d1.shape[0])[b:e] for b,e in zip(*dummy_limits(d1))]
[array([0, 1, 2, 3]), array([4, 5, 6, 7]), array([ 8, 9, 10, 11])]
'''
nobs, nvars = d.shape
start1, col1 = np.nonzero(np.diff(d,axis=0)==1)
end1, col1_ = np.nonzero(np.diff(d,axis=0)==-1)
cc = np.arange(nvars)
#print(cc, np.r_[[0], col1], np.r_[col1_, [nvars-1]]
if ((not (np.r_[[0], col1] == cc).all())
or (not (np.r_[col1_, [nvars-1]] == cc).all())):
raise ValueError('dummy variable is not sorted')
start = np.r_[[0], start1+1]
end = np.r_[end1+1, [nobs]]
return start, end | start and endpoints of groups in a sorted dummy variable array
helper function for nested categories
Examples
--------
>>> d1 = np.array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
>>> dummy_limits(d1)
(array([0, 4, 8]), array([ 4, 8, 12]))
get group slices from an array
>>> [np.arange(d1.shape[0])[b:e] for b,e in zip(*dummy_limits(d1))]
[array([0, 1, 2, 3]), array([4, 5, 6, 7]), array([ 8, 9, 10, 11])]
>>> [np.arange(d1.shape[0])[b:e] for b,e in zip(*dummy_limits(d1))]
[array([0, 1, 2, 3]), array([4, 5, 6, 7]), array([ 8, 9, 10, 11])] | dummy_limits | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def dummy_nested(d1, d2, method='full'):
'''unfinished and incomplete mainly copy past dummy_product
dummy variable from product of two dummy variables
Parameters
----------
d1, d2 : ndarray
two dummy variables, d2 is assumed to be nested in d1
Assumes full set for methods 'drop-last' and 'drop-first'.
method : {'full', 'drop-last', 'drop-first'}
'full' returns the full product, which in this case is d2.
The drop methods provide an effects encoding:
(constant, main effects, subgroup effects). The first or last columns
of the dummy variable (i.e. levels) are dropped to get full rank
encoding.
Returns
-------
dummy : ndarray
dummy variable for product, see method
'''
if method == 'full':
return d2
start1, end1 = dummy_limits(d1)
start2, end2 = dummy_limits(d2)
first = np.in1d(start2, start1)
last = np.in1d(end2, end1)
equal = (first == last)
col_dropf = ~first*~equal
col_dropl = ~last*~equal
if method == 'drop-last':
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,:-1], d2[:,col_dropl]))
#Note: dtype int should preserve dtype of d1 and d2
elif method == 'drop-first':
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,1:], d2[:,col_dropf]))
else:
raise ValueError('method not recognized')
return dd, col_dropf, col_dropl | unfinished and incomplete mainly copy past dummy_product
dummy variable from product of two dummy variables
Parameters
----------
d1, d2 : ndarray
two dummy variables, d2 is assumed to be nested in d1
Assumes full set for methods 'drop-last' and 'drop-first'.
method : {'full', 'drop-last', 'drop-first'}
'full' returns the full product, which in this case is d2.
The drop methods provide an effects encoding:
(constant, main effects, subgroup effects). The first or last columns
of the dummy variable (i.e. levels) are dropped to get full rank
encoding.
Returns
-------
dummy : ndarray
dummy variable for product, see method | dummy_nested | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def __init__(self, d1, d2):
'''C such that d1 C = d2, with d1 = X, d2 = Z
should be (x, z) in arguments ?
'''
self.transf_matrix = np.linalg.lstsq(d1, d2, rcond=-1)[0]
self.invtransf_matrix = np.linalg.lstsq(d2, d1, rcond=-1)[0] | C such that d1 C = d2, with d1 = X, d2 = Z
should be (x, z) in arguments ? | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def dot_left(self, a):
''' b = C a
'''
return np.dot(self.transf_matrix, a) | b = C a | dot_left | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def dot_right(self, x):
''' z = x C
'''
return np.dot(x, self.transf_matrix) | z = x C | dot_right | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def inv_dot_left(self, b):
''' a = C^{-1} b
'''
return np.dot(self.invtransf_matrix, b) | a = C^{-1} b | inv_dot_left | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def inv_dot_right(self, z):
''' x = z C^{-1}
'''
return np.dot(z, self.invtransf_matrix) | x = z C^{-1} | inv_dot_right | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
def groupmean_d(x, d):
'''groupmeans using dummy variables
Parameters
----------
x : array_like, ndim
data array, tested for 1,2 and 3 dimensions
d : ndarray, 1d
dummy variable, needs to have the same length
as x in axis 0.
Returns
-------
groupmeans : ndarray, ndim-1
means for each group along axis 0, the levels
of the groups are the last axis
Notes
-----
This will be memory intensive if there are many levels
in the categorical variable, i.e. many columns in the
dummy variable. In this case it is recommended to use
a more efficient version.
'''
x = np.asarray(x)
## if x.ndim == 1:
## nvars = 1
## else:
nvars = x.ndim + 1
sli = [slice(None)] + [None]*(nvars-2) + [slice(None)]
return (x[...,None] * d[sli]).sum(0)*1./d.sum(0) | groupmeans using dummy variables
Parameters
----------
x : array_like, ndim
data array, tested for 1,2 and 3 dimensions
d : ndarray, 1d
dummy variable, needs to have the same length
as x in axis 0.
Returns
-------
groupmeans : ndarray, ndim-1
means for each group along axis 0, the levels
of the groups are the last axis
Notes
-----
This will be memory intensive if there are many levels
in the categorical variable, i.e. many columns in the
dummy variable. In this case it is recommended to use
a more efficient version. | groupmean_d | python | statsmodels/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/stats/contrast_tools.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.