code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def kernel_pdf_lognorm_(x, sample, bw):
"""Log-normal kernel for density, pdf, estimation, explicit formula.
Jin, Kawczak 2003
"""
term = 8 * np.log(1 + bw) # this is 2 * variance in normal pdf
pdf = (1 / np.sqrt(term * np.pi) / sample *
np.exp(- (np.log(x) - np.log(sample))**2 / term))
return pdf.mean(-1) | Log-normal kernel for density, pdf, estimation, explicit formula.
Jin, Kawczak 2003 | kernel_pdf_lognorm_ | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels_asymmetric.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels_asymmetric.py | BSD-3-Clause |
def _compute_min_std_IQR(data):
"""Compute minimum of std and IQR for each variable."""
s1 = np.std(data, axis=0)
q75 = mquantiles(data, 0.75, axis=0).data[0]
q25 = mquantiles(data, 0.25, axis=0).data[0]
s2 = (q75 - q25) / 1.349 # IQR
dispersion = np.minimum(s1, s2)
return dispersion | Compute minimum of std and IQR for each variable. | _compute_min_std_IQR | python | statsmodels/statsmodels | statsmodels/nonparametric/_kernel_base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/_kernel_base.py | BSD-3-Clause |
def _compute_subset(class_type, data, bw, co, do, n_cvars, ix_ord,
ix_unord, n_sub, class_vars, randomize, bound):
""""Compute bw on subset of data.
Called from ``GenericKDE._compute_efficient_*``.
Notes
-----
Needs to be outside the class in order for joblib to be able to pickle it.
"""
if randomize:
np.random.shuffle(data)
sub_data = data[:n_sub, :]
else:
sub_data = data[bound[0]:bound[1], :]
if class_type == 'KDEMultivariate':
from .kernel_density import KDEMultivariate
var_type = class_vars[0]
sub_model = KDEMultivariate(sub_data, var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
elif class_type == 'KDEMultivariateConditional':
from .kernel_density import KDEMultivariateConditional
k_dep, dep_type, indep_type = class_vars
endog = sub_data[:, :k_dep]
exog = sub_data[:, k_dep:]
sub_model = KDEMultivariateConditional(endog, exog, dep_type,
indep_type, bw=bw, defaults=EstimatorSettings(efficient=False))
elif class_type == 'KernelReg':
from .kernel_regression import KernelReg
var_type, k_vars, reg_type = class_vars
endog = _adjust_shape(sub_data[:, 0], 1)
exog = _adjust_shape(sub_data[:, 1:], k_vars)
sub_model = KernelReg(endog=endog, exog=exog, reg_type=reg_type,
var_type=var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
else:
raise ValueError("class_type not recognized, should be one of " \
"{KDEMultivariate, KDEMultivariateConditional, KernelReg}")
# Compute dispersion in next 4 lines
if class_type == 'KernelReg':
sub_data = sub_data[:, 1:]
dispersion = _compute_min_std_IQR(sub_data)
fct = dispersion * n_sub**(-1. / (n_cvars + co))
fct[ix_unord] = n_sub**(-2. / (n_cvars + do))
fct[ix_ord] = n_sub**(-2. / (n_cvars + do))
sample_scale_sub = sub_model.bw / fct #TODO: check if correct
bw_sub = sub_model.bw
return sample_scale_sub, bw_sub | Compute bw on subset of data.
Called from ``GenericKDE._compute_efficient_*``.
Notes
-----
Needs to be outside the class in order for joblib to be able to pickle it. | _compute_subset | python | statsmodels/statsmodels | statsmodels/nonparametric/_kernel_base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/_kernel_base.py | BSD-3-Clause |
def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw : {array_like, str}
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
if bw is None:
bw = 'normal_reference'
if not isinstance(bw, str):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
# Workaround to avoid instance methods in __dict__
if bw == 'normal_reference':
bwfunc = self._normal_reference
elif bw == 'cv_ml':
bwfunc = self._cv_ml
else: # bw == 'cv_ls'
bwfunc = self._cv_ls
res = bwfunc()
return res | Computes the bandwidth of the data.
Parameters
----------
bw : {array_like, str}
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'. | _compute_bw | python | statsmodels/statsmodels | statsmodels/nonparametric/_kernel_base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/_kernel_base.py | BSD-3-Clause |
def _compute_dispersion(self, data):
"""
Computes the measure of dispersion.
The minimum of the standard deviation and interquartile range / 1.349
Notes
-----
Reimplemented in `KernelReg`, because the first column of `data` has to
be removed.
References
----------
See the user guide for the np package in R.
In the notes on bwscaling option in npreg, npudens, npcdens there is
a discussion on the measure of dispersion
"""
return _compute_min_std_IQR(data) | Computes the measure of dispersion.
The minimum of the standard deviation and interquartile range / 1.349
Notes
-----
Reimplemented in `KernelReg`, because the first column of `data` has to
be removed.
References
----------
See the user guide for the np package in R.
In the notes on bwscaling option in npreg, npudens, npcdens there is
a discussion on the measure of dispersion | _compute_dispersion | python | statsmodels/statsmodels | statsmodels/nonparametric/_kernel_base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/_kernel_base.py | BSD-3-Clause |
def _get_class_vars_type(self):
"""Helper method to be able to pass needed vars to _compute_subset.
Needs to be implemented by subclasses."""
pass | Helper method to be able to pass needed vars to _compute_subset.
Needs to be implemented by subclasses. | _get_class_vars_type | python | statsmodels/statsmodels | statsmodels/nonparametric/_kernel_base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/_kernel_base.py | BSD-3-Clause |
def _compute_efficient(self, bw):
"""
Computes the bandwidth by estimating the scaling factor (c)
in n_res resamples of size ``n_sub`` (in `randomize` case), or by
dividing ``nobs`` into as many ``n_sub`` blocks as needed (if
`randomize` is False).
References
----------
See p.9 in socserv.mcmaster.ca/racine/np_faq.pdf
"""
if bw is None:
self._bw_method = 'normal_reference'
if isinstance(bw, str):
self._bw_method = bw
else:
self._bw_method = "user-specified"
return bw
nobs = self.nobs
n_sub = self.n_sub
data = copy.deepcopy(self.data)
n_cvars = self.data_type.count('c')
co = 4 # 2*order of continuous kernel
do = 4 # 2*order of discrete kernel
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
# Define bounds for slicing the data
if self.randomize:
# randomize chooses blocks of size n_sub, independent of nobs
bounds = [None] * self.n_res
else:
bounds = [(i * n_sub, (i+1) * n_sub) for i in range(nobs // n_sub)]
if nobs % n_sub > 0:
bounds.append((nobs - nobs % n_sub, nobs))
n_blocks = self.n_res if self.randomize else len(bounds)
sample_scale = np.empty((n_blocks, self.k_vars))
only_bw = np.empty((n_blocks, self.k_vars))
class_type, class_vars = self._get_class_vars_type()
if has_joblib:
# `res` is a list of tuples (sample_scale_sub, bw_sub)
res = joblib.Parallel(n_jobs=self.n_jobs)(
joblib.delayed(_compute_subset)(
class_type, data, bw, co, do, n_cvars, ix_ord, ix_unord, \
n_sub, class_vars, self.randomize, bounds[i]) \
for i in range(n_blocks))
else:
res = []
for i in range(n_blocks):
res.append(_compute_subset(class_type, data, bw, co, do,
n_cvars, ix_ord, ix_unord, n_sub,
class_vars, self.randomize,
bounds[i]))
for i in range(n_blocks):
sample_scale[i, :] = res[i][0]
only_bw[i, :] = res[i][1]
s = self._compute_dispersion(data)
order_func = np.median if self.return_median else np.mean
m_scale = order_func(sample_scale, axis=0)
# TODO: Check if 1/5 is correct in line below!
bw = m_scale * s * nobs**(-1. / (n_cvars + co))
bw[ix_ord] = m_scale[ix_ord] * nobs**(-2./ (n_cvars + do))
bw[ix_unord] = m_scale[ix_unord] * nobs**(-2./ (n_cvars + do))
if self.return_only_bw:
bw = np.median(only_bw, axis=0)
return bw | Computes the bandwidth by estimating the scaling factor (c)
in n_res resamples of size ``n_sub`` (in `randomize` case), or by
dividing ``nobs`` into as many ``n_sub`` blocks as needed (if
`randomize` is False).
References
----------
See p.9 in socserv.mcmaster.ca/racine/np_faq.pdf | _compute_efficient | python | statsmodels/statsmodels | statsmodels/nonparametric/_kernel_base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/_kernel_base.py | BSD-3-Clause |
def _set_defaults(self, defaults):
"""Sets the default values for the efficient estimation"""
self.n_res = defaults.n_res
self.n_sub = defaults.n_sub
self.randomize = defaults.randomize
self.return_median = defaults.return_median
self.efficient = defaults.efficient
self.return_only_bw = defaults.return_only_bw
self.n_jobs = defaults.n_jobs | Sets the default values for the efficient estimation | _set_defaults | python | statsmodels/statsmodels | statsmodels/nonparametric/_kernel_base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/_kernel_base.py | BSD-3-Clause |
def _normal_reference(self):
"""
Returns Scott's normal reference rule of thumb bandwidth parameter.
Notes
-----
See p.13 in [2] for an example and discussion. The formula for the
bandwidth is
.. math:: h = 1.06n^{-1/(4+q)}
where ``n`` is the number of observations and ``q`` is the number of
variables.
"""
X = np.std(self.data, axis=0)
return 1.06 * X * self.nobs ** (- 1. / (4 + self.data.shape[1])) | Returns Scott's normal reference rule of thumb bandwidth parameter.
Notes
-----
See p.13 in [2] for an example and discussion. The formula for the
bandwidth is
.. math:: h = 1.06n^{-1/(4+q)}
where ``n`` is the number of observations and ``q`` is the number of
variables. | _normal_reference | python | statsmodels/statsmodels | statsmodels/nonparametric/_kernel_base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/_kernel_base.py | BSD-3-Clause |
def _set_bw_bounds(self, bw):
"""
Sets bandwidth lower bound to effectively zero )1e-10), and for
discrete values upper bound to 1.
"""
bw[bw < 0] = 1e-10
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
bw[ix_ord] = np.minimum(bw[ix_ord], 1.)
bw[ix_unord] = np.minimum(bw[ix_unord], 1.)
return bw | Sets bandwidth lower bound to effectively zero )1e-10), and for
discrete values upper bound to 1. | _set_bw_bounds | python | statsmodels/statsmodels | statsmodels/nonparametric/_kernel_base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/_kernel_base.py | BSD-3-Clause |
def _adjust_shape(dat, k_vars):
""" Returns an array of shape (nobs, k_vars) for use with `gpke`."""
dat = np.asarray(dat)
if dat.ndim > 2:
dat = np.squeeze(dat)
if dat.ndim == 1 and k_vars > 1: # one obs many vars
nobs = 1
elif dat.ndim == 1 and k_vars == 1: # one obs one var
nobs = len(dat)
else:
if np.shape(dat)[0] == k_vars and np.shape(dat)[1] != k_vars:
dat = dat.T
nobs = np.shape(dat)[0] # ndim >1 so many obs many vars
dat = np.reshape(dat, (nobs, k_vars))
return dat | Returns an array of shape (nobs, k_vars) for use with `gpke`. | _adjust_shape | python | statsmodels/statsmodels | statsmodels/nonparametric/_kernel_base.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/_kernel_base.py | BSD-3-Clause |
def gaussian(h, Xi, x):
"""
Gaussian Kernel for continuous variables
Parameters
----------
h : 1-D ndarray, shape (K,)
The bandwidths used to estimate the value of the kernel function.
Xi : 1-D ndarray, shape (K,)
The value of the training set.
x : 1-D ndarray, shape (K,)
The value at which the kernel density is being estimated.
Returns
-------
kernel_value : ndarray, shape (nobs, K)
The value of the kernel function at each training point for each var.
"""
return (1. / np.sqrt(2 * np.pi)) * np.exp(-(Xi - x)**2 / (h**2 * 2.)) | Gaussian Kernel for continuous variables
Parameters
----------
h : 1-D ndarray, shape (K,)
The bandwidths used to estimate the value of the kernel function.
Xi : 1-D ndarray, shape (K,)
The value of the training set.
x : 1-D ndarray, shape (K,)
The value at which the kernel density is being estimated.
Returns
-------
kernel_value : ndarray, shape (nobs, K)
The value of the kernel function at each training point for each var. | gaussian | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels.py | BSD-3-Clause |
def tricube(h, Xi, x):
"""
Tricube Kernel for continuous variables
Parameters
----------
h : 1-D ndarray, shape (K,)
The bandwidths used to estimate the value of the kernel function.
Xi : 1-D ndarray, shape (K,)
The value of the training set.
x : 1-D ndarray, shape (K,)
The value at which the kernel density is being estimated.
Returns
-------
kernel_value : ndarray, shape (nobs, K)
The value of the kernel function at each training point for each var.
"""
u = (Xi - x) / h
u[np.abs(u) > 1] = 0
return (70. / 81) * (1 - np.abs(u)**3)**3 | Tricube Kernel for continuous variables
Parameters
----------
h : 1-D ndarray, shape (K,)
The bandwidths used to estimate the value of the kernel function.
Xi : 1-D ndarray, shape (K,)
The value of the training set.
x : 1-D ndarray, shape (K,)
The value at which the kernel density is being estimated.
Returns
-------
kernel_value : ndarray, shape (nobs, K)
The value of the kernel function at each training point for each var. | tricube | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels.py | BSD-3-Clause |
def gaussian_convolution(h, Xi, x):
""" Calculates the Gaussian Convolution Kernel """
return (1. / np.sqrt(4 * np.pi)) * np.exp(- (Xi - x)**2 / (h**2 * 4.)) | Calculates the Gaussian Convolution Kernel | gaussian_convolution | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels.py | BSD-3-Clause |
def aitchison_aitken_reg(h, Xi, x):
"""
A version for the Aitchison-Aitken kernel for nonparametric regression.
Suggested by Li and Racine.
"""
kernel_value = np.ones(Xi.size)
ix = Xi != x
inDom = ix * h
kernel_value[ix] = inDom[ix]
return kernel_value | A version for the Aitchison-Aitken kernel for nonparametric regression.
Suggested by Li and Racine. | aitchison_aitken_reg | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels.py | BSD-3-Clause |
def wang_ryzin_reg(h, Xi, x):
"""
A version for the Wang-Ryzin kernel for nonparametric regression.
Suggested by Li and Racine in [1] ch.4
"""
return h ** abs(Xi - x) | A version for the Wang-Ryzin kernel for nonparametric regression.
Suggested by Li and Racine in [1] ch.4 | wang_ryzin_reg | python | statsmodels/statsmodels | statsmodels/nonparametric/kernels.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernels.py | BSD-3-Clause |
def lowess(endog, exog, frac=2.0/3.0, it=3, delta=0.0, xvals=None, is_sorted=False,
missing='drop', return_sorted=True):
'''LOWESS (Locally Weighted Scatterplot Smoothing)
A lowess function that outs smoothed estimates of endog
at the given exog values from points (exog, endog)
Parameters
----------
endog : 1-D numpy array
The y-values of the observed points
exog : 1-D numpy array
The x-values of the observed points
frac : float
Between 0 and 1. The fraction of the data used
when estimating each y-value.
it : int
The number of residual-based reweightings
to perform.
delta : float
Distance within which to use linear-interpolation
instead of weighted regression.
xvals: 1-D numpy array
Values of the exogenous variable at which to evaluate the regression.
If supplied, cannot use delta.
is_sorted : bool
If False (default), then the data will be sorted by exog before
calculating lowess. If True, then it is assumed that the data is
already sorted by exog. If xvals is specified, then it too must be
sorted if is_sorted is True.
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'drop'.
return_sorted : bool
If True (default), then the returned array is sorted by exog and has
missing (nan or infinite) observations removed.
If False, then the returned array is in the same length and the same
sequence of observations as the input array.
Returns
-------
out : {ndarray, float}
The returned array is two-dimensional if return_sorted is True, and
one dimensional if return_sorted is False.
If return_sorted is True, then a numpy array with two columns. The
first column contains the sorted x (exog) values and the second column
the associated estimated y (endog) values.
If return_sorted is False, then only the fitted values are returned,
and the observations will be in the same order as the input arrays.
If xvals is provided, then return_sorted is ignored and the returned
array is always one dimensional, containing the y values fitted at
the x values provided by xvals.
Notes
-----
This lowess function implements the algorithm given in the
reference below using local linear estimates.
Suppose the input data has N points. The algorithm works by
estimating the `smooth` y_i by taking the frac*N closest points
to (x_i,y_i) based on their x values and estimating y_i
using a weighted linear regression. The weight for (x_j,y_j)
is tricube function applied to abs(x_i-x_j).
If it > 1, then further weighted local linear regressions
are performed, where the weights are the same as above
times the _lowess_bisquare function of the residuals. Each iteration
takes approximately the same amount of time as the original fit,
so these iterations are expensive. They are most useful when
the noise has extremely heavy tails, such as Cauchy noise.
Noise with less heavy-tails, such as t-distributions with df>2,
are less problematic. The weights downgrade the influence of
points with large residuals. In the extreme case, points whose
residuals are larger than 6 times the median absolute residual
are given weight 0.
`delta` can be used to save computations. For each `x_i`, regressions
are skipped for points closer than `delta`. The next regression is
fit for the farthest point within delta of `x_i` and all points in
between are estimated by linearly interpolating between the two
regression fits.
Judicious choice of delta can cut computation time considerably
for large data (N > 5000). A good choice is ``delta = 0.01 * range(exog)``.
If `xvals` is provided, the regression is then computed at those points
and the fit values are returned. Otherwise, the regression is run
at points of `exog`.
Some experimentation is likely required to find a good
choice of `frac` and `iter` for a particular dataset.
References
----------
Cleveland, W.S. (1979) "Robust Locally Weighted Regression
and Smoothing Scatterplots". Journal of the American Statistical
Association 74 (368): 829-836.
Examples
--------
The below allows a comparison between how different the fits from
lowess for different values of frac can be.
>>> import numpy as np
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + np.random.normal(size=len(x))
>>> z = lowess(y, x)
>>> w = lowess(y, x, frac=1./3)
This gives a similar comparison for when it is 0 vs not.
>>> import numpy as np
>>> import scipy.stats as stats
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))
>>> z = lowess(y, x, frac= 1./3, it=0)
>>> w = lowess(y, x, frac=1./3)
'''
endog = np.asarray(endog, float)
exog = np.asarray(exog, float)
# Whether xvals argument was provided
given_xvals = (xvals is not None)
# Inputs should be vectors (1-D arrays) of the
# same length.
if exog.ndim != 1:
raise ValueError('exog must be a vector')
if endog.ndim != 1:
raise ValueError('endog must be a vector')
if endog.shape[0] != exog.shape[0] :
raise ValueError('exog and endog must have same length')
if xvals is not None:
xvals = np.ascontiguousarray(xvals)
if xvals.ndim != 1:
raise ValueError('exog_predict must be a vector')
if missing in ['drop', 'raise']:
mask_valid = (np.isfinite(exog) & np.isfinite(endog))
all_valid = np.all(mask_valid)
if all_valid:
y = endog
x = exog
else:
if missing == 'drop':
x = exog[mask_valid]
y = endog[mask_valid]
else:
raise ValueError('nan or inf found in data')
elif missing == 'none':
y = endog
x = exog
all_valid = True # we assume it's true if missing='none'
else:
raise ValueError("missing can only be 'none', 'drop' or 'raise'")
if not is_sorted:
# Sort both inputs according to the ascending order of x values
sort_index = np.argsort(x)
x = np.array(x[sort_index])
y = np.array(y[sort_index])
if not given_xvals:
# If given no explicit x values, we use the x-values in the exog array
xvals = exog
xvalues = x
xvals_all_valid = all_valid
if missing == 'drop':
xvals_mask_valid = mask_valid
else:
if delta != 0.0:
raise ValueError("Cannot have non-zero 'delta' and 'xvals' values")
# TODO: allow this again
mask_valid = np.isfinite(xvals)
if missing == "raise":
raise ValueError("NaN values in xvals with missing='raise'")
elif missing == 'drop':
xvals_mask_valid = mask_valid
xvalues = xvals
xvals_all_valid = True if missing == "none" else np.all(mask_valid)
# With explicit xvals, we ignore 'return_sorted' and always
# use the order provided
return_sorted = False
if missing in ['drop', 'raise']:
xvals_mask_valid = np.isfinite(xvals)
xvals_all_valid = np.all(xvals_mask_valid)
if xvals_all_valid:
xvalues = xvals
else:
if missing == 'drop':
xvalues = xvals[xvals_mask_valid]
else:
raise ValueError("nan or inf found in xvals")
if not is_sorted:
sort_index = np.argsort(xvalues)
xvalues = np.array(xvalues[sort_index])
else:
xvals_all_valid = True
y = np.ascontiguousarray(y)
x = np.ascontiguousarray(x)
if not given_xvals:
# Run LOWESS on the data points
res, _ = _lowess(y, x, x, np.ones_like(x),
frac=frac, it=it, delta=delta, given_xvals=False)
else:
# First run LOWESS on the data points to get the weights of the data points
# using it-1 iterations, last iter done next
if it > 0:
_, weights = _lowess(y, x, x, np.ones_like(x),
frac=frac, it=it-1, delta=delta, given_xvals=False)
else:
weights = np.ones_like(x)
xvalues = np.ascontiguousarray(xvalues, dtype=float)
# Then run once more using those supplied weights at the points provided by xvals
# No extra iterations are performed here since weights are fixed
res, _ = _lowess(y, x, xvalues, weights,
frac=frac, it=0, delta=delta, given_xvals=True)
_, yfitted = res.T
if return_sorted:
return res
else:
# rebuild yfitted with original indices
# a bit messy: y might have been selected twice
if not is_sorted:
yfitted_ = np.empty_like(xvalues)
yfitted_.fill(np.nan)
yfitted_[sort_index] = yfitted
yfitted = yfitted_
else:
yfitted = yfitted
if not xvals_all_valid:
yfitted_ = np.empty_like(xvals)
yfitted_.fill(np.nan)
yfitted_[xvals_mask_valid] = yfitted
yfitted = yfitted_
# we do not need to return exog anymore
return yfitted | LOWESS (Locally Weighted Scatterplot Smoothing)
A lowess function that outs smoothed estimates of endog
at the given exog values from points (exog, endog)
Parameters
----------
endog : 1-D numpy array
The y-values of the observed points
exog : 1-D numpy array
The x-values of the observed points
frac : float
Between 0 and 1. The fraction of the data used
when estimating each y-value.
it : int
The number of residual-based reweightings
to perform.
delta : float
Distance within which to use linear-interpolation
instead of weighted regression.
xvals: 1-D numpy array
Values of the exogenous variable at which to evaluate the regression.
If supplied, cannot use delta.
is_sorted : bool
If False (default), then the data will be sorted by exog before
calculating lowess. If True, then it is assumed that the data is
already sorted by exog. If xvals is specified, then it too must be
sorted if is_sorted is True.
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'drop'.
return_sorted : bool
If True (default), then the returned array is sorted by exog and has
missing (nan or infinite) observations removed.
If False, then the returned array is in the same length and the same
sequence of observations as the input array.
Returns
-------
out : {ndarray, float}
The returned array is two-dimensional if return_sorted is True, and
one dimensional if return_sorted is False.
If return_sorted is True, then a numpy array with two columns. The
first column contains the sorted x (exog) values and the second column
the associated estimated y (endog) values.
If return_sorted is False, then only the fitted values are returned,
and the observations will be in the same order as the input arrays.
If xvals is provided, then return_sorted is ignored and the returned
array is always one dimensional, containing the y values fitted at
the x values provided by xvals.
Notes
-----
This lowess function implements the algorithm given in the
reference below using local linear estimates.
Suppose the input data has N points. The algorithm works by
estimating the `smooth` y_i by taking the frac*N closest points
to (x_i,y_i) based on their x values and estimating y_i
using a weighted linear regression. The weight for (x_j,y_j)
is tricube function applied to abs(x_i-x_j).
If it > 1, then further weighted local linear regressions
are performed, where the weights are the same as above
times the _lowess_bisquare function of the residuals. Each iteration
takes approximately the same amount of time as the original fit,
so these iterations are expensive. They are most useful when
the noise has extremely heavy tails, such as Cauchy noise.
Noise with less heavy-tails, such as t-distributions with df>2,
are less problematic. The weights downgrade the influence of
points with large residuals. In the extreme case, points whose
residuals are larger than 6 times the median absolute residual
are given weight 0.
`delta` can be used to save computations. For each `x_i`, regressions
are skipped for points closer than `delta`. The next regression is
fit for the farthest point within delta of `x_i` and all points in
between are estimated by linearly interpolating between the two
regression fits.
Judicious choice of delta can cut computation time considerably
for large data (N > 5000). A good choice is ``delta = 0.01 * range(exog)``.
If `xvals` is provided, the regression is then computed at those points
and the fit values are returned. Otherwise, the regression is run
at points of `exog`.
Some experimentation is likely required to find a good
choice of `frac` and `iter` for a particular dataset.
References
----------
Cleveland, W.S. (1979) "Robust Locally Weighted Regression
and Smoothing Scatterplots". Journal of the American Statistical
Association 74 (368): 829-836.
Examples
--------
The below allows a comparison between how different the fits from
lowess for different values of frac can be.
>>> import numpy as np
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + np.random.normal(size=len(x))
>>> z = lowess(y, x)
>>> w = lowess(y, x, frac=1./3)
This gives a similar comparison for when it is 0 vs not.
>>> import numpy as np
>>> import scipy.stats as stats
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))
>>> z = lowess(y, x, frac= 1./3, it=0)
>>> w = lowess(y, x, frac=1./3) | lowess | python | statsmodels/statsmodels | statsmodels/nonparametric/smoothers_lowess.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/smoothers_lowess.py | BSD-3-Clause |
def forrt(X, m=None):
"""
RFFT with order like Munro (1976) FORTT routine.
"""
if m is None:
m = len(X)
y = np.fft.rfft(X, m) / m
return np.r_[y.real, y[1:-1].imag] | RFFT with order like Munro (1976) FORTT routine. | forrt | python | statsmodels/statsmodels | statsmodels/nonparametric/kdetools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kdetools.py | BSD-3-Clause |
def revrt(X, m=None):
"""
Inverse of forrt. Equivalent to Munro (1976) REVRT routine.
"""
if m is None:
m = len(X)
i = int(m // 2 + 1)
y = X[:i] + np.r_[0, X[i:], 0] * 1j
return np.fft.irfft(y)*m | Inverse of forrt. Equivalent to Munro (1976) REVRT routine. | revrt | python | statsmodels/statsmodels | statsmodels/nonparametric/kdetools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kdetools.py | BSD-3-Clause |
def silverman_transform(bw, M, RANGE):
"""
FFT of Gaussian kernel following to Silverman AS 176.
Notes
-----
Underflow is intentional as a dampener.
"""
J = np.arange(M/2+1)
FAC1 = 2*(np.pi*bw/RANGE)**2
JFAC = J**2*FAC1
BC = 1 - 1. / 3 * (J * 1./M*np.pi)**2
FAC = np.exp(-JFAC)/BC
kern_est = np.r_[FAC, FAC[1:-1]]
return kern_est | FFT of Gaussian kernel following to Silverman AS 176.
Notes
-----
Underflow is intentional as a dampener. | silverman_transform | python | statsmodels/statsmodels | statsmodels/nonparametric/kdetools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kdetools.py | BSD-3-Clause |
def counts(x, v):
"""
Counts the number of elements of x that fall within the grid points v
Notes
-----
Using np.digitize and np.bincount
"""
idx = np.digitize(x, v)
return np.bincount(idx, minlength=len(v)) | Counts the number of elements of x that fall within the grid points v
Notes
-----
Using np.digitize and np.bincount | counts | python | statsmodels/statsmodels | statsmodels/nonparametric/kdetools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kdetools.py | BSD-3-Clause |
def fit(
self,
kernel="gau",
bw="normal_reference",
fft=True,
weights=None,
gridsize=None,
adjust=1,
cut=3,
clip=(-np.inf, np.inf),
):
"""
Attach the density estimate to the KDEUnivariate class.
Parameters
----------
kernel : str
The Kernel to be used. Choices are:
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
fft : bool
Whether or not to use FFT. FFT implementation is more
computationally efficient. However, only the Gaussian kernel
is implemented. If FFT is False, then a 'nobs' x 'gridsize'
intermediate array is created.
gridsize : int
If gridsize is None, max(len(x), 50) is used.
cut : float
Defines the length of the grid past the lowest and highest values
of x so that the kernel goes to zero. The end points are
``min(x) - cut * adjust * bw`` and ``max(x) + cut * adjust * bw``.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
Returns
-------
KDEUnivariate
The instance fit,
"""
if isinstance(bw, str):
self.bw_method = bw
else:
self.bw_method = "user-given"
if not callable(bw):
bw = float_like(bw, "bw")
endog = self.endog
if fft:
if kernel != "gau":
msg = "Only gaussian kernel is available for fft"
raise NotImplementedError(msg)
if weights is not None:
msg = "Weights are not implemented for fft"
raise NotImplementedError(msg)
density, grid, bw = kdensityfft(
endog,
kernel=kernel,
bw=bw,
adjust=adjust,
weights=weights,
gridsize=gridsize,
clip=clip,
cut=cut,
)
else:
density, grid, bw = kdensity(
endog,
kernel=kernel,
bw=bw,
adjust=adjust,
weights=weights,
gridsize=gridsize,
clip=clip,
cut=cut,
)
self.density = density
self.support = grid
self.bw = bw
self.kernel = kernel_switch[kernel](h=bw) # we instantiate twice,
# should this passed to funcs?
# put here to ensure empty cache after re-fit with new options
self.kernel.weights = weights
if weights is not None:
self.kernel.weights /= weights.sum()
self._cache = {}
return self | Attach the density estimate to the KDEUnivariate class.
Parameters
----------
kernel : str
The Kernel to be used. Choices are:
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
fft : bool
Whether or not to use FFT. FFT implementation is more
computationally efficient. However, only the Gaussian kernel
is implemented. If FFT is False, then a 'nobs' x 'gridsize'
intermediate array is created.
gridsize : int
If gridsize is None, max(len(x), 50) is used.
cut : float
Defines the length of the grid past the lowest and highest values
of x so that the kernel goes to zero. The end points are
``min(x) - cut * adjust * bw`` and ``max(x) + cut * adjust * bw``.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
Returns
-------
KDEUnivariate
The instance fit, | fit | python | statsmodels/statsmodels | statsmodels/nonparametric/kde.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kde.py | BSD-3-Clause |
def cdf(self):
"""
Returns the cumulative distribution function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
kern = self.kernel
if kern.domain is None: # TODO: test for grid point at domain bound
a, b = -np.inf, np.inf
else:
a, b = kern.domain
def func(x, s):
return np.squeeze(kern.density(s, x))
support = self.support
support = np.r_[a, support]
gridsize = len(support)
endog = self.endog
probs = [
integrate.quad(func, support[i - 1], support[i], args=endog)[0]
for i in range(1, gridsize)
]
return np.cumsum(probs) | Returns the cumulative distribution function evaluated at the support.
Notes
-----
Will not work if fit has not been called. | cdf | python | statsmodels/statsmodels | statsmodels/nonparametric/kde.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kde.py | BSD-3-Clause |
def cumhazard(self):
"""
Returns the hazard function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return -np.log(self.sf) | Returns the hazard function evaluated at the support.
Notes
-----
Will not work if fit has not been called. | cumhazard | python | statsmodels/statsmodels | statsmodels/nonparametric/kde.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kde.py | BSD-3-Clause |
def sf(self):
"""
Returns the survival function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return 1 - self.cdf | Returns the survival function evaluated at the support.
Notes
-----
Will not work if fit has not been called. | sf | python | statsmodels/statsmodels | statsmodels/nonparametric/kde.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kde.py | BSD-3-Clause |
def entropy(self):
"""
Returns the differential entropy evaluated at the support
Notes
-----
Will not work if fit has not been called. 1e-12 is added to each
probability to ensure that log(0) is not called.
"""
_checkisfit(self)
def entr(x, s):
pdf = kern.density(s, x)
return pdf * np.log(pdf + 1e-12)
kern = self.kernel
if kern.domain is not None:
a, b = self.domain
else:
a, b = -np.inf, np.inf
endog = self.endog
# TODO: below could run into integr problems, cf. stats.dist._entropy
return -integrate.quad(entr, a, b, args=(endog,))[0] | Returns the differential entropy evaluated at the support
Notes
-----
Will not work if fit has not been called. 1e-12 is added to each
probability to ensure that log(0) is not called. | entropy | python | statsmodels/statsmodels | statsmodels/nonparametric/kde.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kde.py | BSD-3-Clause |
def icdf(self):
"""
Inverse Cumulative Distribution (Quantile) Function
Notes
-----
Will not work if fit has not been called. Uses
`scipy.stats.mstats.mquantiles`.
"""
_checkisfit(self)
gridsize = len(self.density)
return stats.mstats.mquantiles(self.endog, np.linspace(0, 1, gridsize)) | Inverse Cumulative Distribution (Quantile) Function
Notes
-----
Will not work if fit has not been called. Uses
`scipy.stats.mstats.mquantiles`. | icdf | python | statsmodels/statsmodels | statsmodels/nonparametric/kde.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kde.py | BSD-3-Clause |
def evaluate(self, point):
"""
Evaluate density at a point or points.
Parameters
----------
point : {float, ndarray}
Point(s) at which to evaluate the density.
"""
_checkisfit(self)
return self.kernel.density(self.endog, point) | Evaluate density at a point or points.
Parameters
----------
point : {float, ndarray}
Point(s) at which to evaluate the density. | evaluate | python | statsmodels/statsmodels | statsmodels/nonparametric/kde.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kde.py | BSD-3-Clause |
def kdensity(
x,
kernel="gau",
bw="normal_reference",
weights=None,
gridsize=None,
adjust=1,
clip=(-np.inf, np.inf),
cut=3,
retgrid=True,
):
"""
Rosenblatt-Parzen univariate kernel density estimator.
Parameters
----------
x : array_like
The variable for which the density estimate is desired.
kernel : str
The Kernel to be used. Choices are
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
weights : array or None
Optional weights. If the x value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, max(len(x), 50) is used.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in x that are outside of the range given by clip are
dropped. The number of observations in x is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of x
so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(x) or max(x)}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : ndarray
The densities estimated at the grid points.
grid : ndarray, optional
The grid points at which the density is estimated.
Notes
-----
Creates an intermediate (`gridsize` x `nobs`) array. Use FFT for a more
computationally efficient version.
"""
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
clip_x = np.logical_and(x > clip[0], x < clip[1])
x = x[clip_x]
nobs = len(x) # after trim
if gridsize is None:
gridsize = max(nobs, 50) # do not need to resize if no FFT
# handle weights
if weights is None:
weights = np.ones(nobs)
q = nobs
else:
# ensure weights is a numpy array
weights = np.asarray(weights)
if len(weights) != len(clip_x):
msg = "The length of the weights must be the same as the given x."
raise ValueError(msg)
weights = weights[clip_x.squeeze()]
q = weights.sum()
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
if callable(bw):
bw = float(bw(x, kern))
# user passed a callable custom bandwidth function
elif isinstance(bw, str):
bw = bandwidths.select_bandwidth(x, bw, kern)
# will cross-val fit this pattern?
else:
bw = float_like(bw, "bw")
bw *= adjust
a = np.min(x, axis=0) - cut * bw
b = np.max(x, axis=0) + cut * bw
grid = np.linspace(a, b, gridsize)
k = (
x.T - grid[:, None]
) / bw # uses broadcasting to make a gridsize x nobs
# set kernel bandwidth
kern.seth(bw)
# truncate to domain
if (
kern.domain is not None
): # will not work for piecewise kernels like parzen
z_lo, z_high = kern.domain
domain_mask = (k < z_lo) | (k > z_high)
k = kern(k) # estimate density
k[domain_mask] = 0
else:
k = kern(k) # estimate density
k[k < 0] = 0 # get rid of any negative values, do we need this?
dens = np.dot(k, weights) / (q * bw)
if retgrid:
return dens, grid, bw
else:
return dens, bw | Rosenblatt-Parzen univariate kernel density estimator.
Parameters
----------
x : array_like
The variable for which the density estimate is desired.
kernel : str
The Kernel to be used. Choices are
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
weights : array or None
Optional weights. If the x value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, max(len(x), 50) is used.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in x that are outside of the range given by clip are
dropped. The number of observations in x is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of x
so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(x) or max(x)}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : ndarray
The densities estimated at the grid points.
grid : ndarray, optional
The grid points at which the density is estimated.
Notes
-----
Creates an intermediate (`gridsize` x `nobs`) array. Use FFT for a more
computationally efficient version. | kdensity | python | statsmodels/statsmodels | statsmodels/nonparametric/kde.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kde.py | BSD-3-Clause |
def kdensityfft(
x,
kernel="gau",
bw="normal_reference",
weights=None,
gridsize=None,
adjust=1,
clip=(-np.inf, np.inf),
cut=3,
retgrid=True,
):
"""
Rosenblatt-Parzen univariate kernel density estimator
Parameters
----------
x : array_like
The variable for which the density estimate is desired.
kernel : str
ONLY GAUSSIAN IS CURRENTLY IMPLEMENTED.
"bi" for biweight
"cos" for cosine
"epa" for Epanechnikov, default
"epa2" for alternative Epanechnikov
"gau" for Gaussian.
"par" for Parzen
"rect" for rectangular
"tri" for triangular
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
weights : array or None
WEIGHTS ARE NOT CURRENTLY IMPLEMENTED.
Optional weights. If the x value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, min(len(x), 512) is used. Note that the provided
number is rounded up to the next highest power of 2.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in x that are outside of the range given by clip are
dropped. The number of observations in x is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of x
so that the kernel goes to zero. The end points are
-/+ cut*bw*{x.min() or x.max()}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : ndarray
The densities estimated at the grid points.
grid : ndarray, optional
The grid points at which the density is estimated.
Notes
-----
Only the default kernel is implemented. Weights are not implemented yet.
This follows Silverman (1982) with changes suggested by Jones and Lotwick
(1984). However, the discretization step is replaced by linear binning
of Fan and Marron (1994). This should be extended to accept the parts
that are dependent only on the data to speed things up for
cross-validation.
References
----------
Fan, J. and J.S. Marron. (1994) `Fast implementations of nonparametric
curve estimators`. Journal of Computational and Graphical Statistics.
3.1, 35-56.
Jones, M.C. and H.W. Lotwick. (1984) `Remark AS R50: A Remark on Algorithm
AS 176. Kernal Density Estimation Using the Fast Fourier Transform`.
Journal of the Royal Statistical Society. Series C. 33.1, 120-2.
Silverman, B.W. (1982) `Algorithm AS 176. Kernel density estimation using
the Fast Fourier Transform. Journal of the Royal Statistical Society.
Series C. 31.2, 93-9.
"""
x = np.asarray(x)
# will not work for two columns.
x = x[np.logical_and(x > clip[0], x < clip[1])]
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
if callable(bw):
bw = float(bw(x, kern))
# user passed a callable custom bandwidth function
elif isinstance(bw, str):
# if bw is None, select optimal bandwidth for kernel
bw = bandwidths.select_bandwidth(x, bw, kern)
# will cross-val fit this pattern?
else:
bw = float_like(bw, "bw")
bw *= adjust
nobs = len(x) # after trim
# 1 Make grid and discretize the data
if gridsize is None:
gridsize = np.max((nobs, 512.0))
gridsize = 2 ** np.ceil(np.log2(gridsize)) # round to next power of 2
a = np.min(x) - cut * bw
b = np.max(x) + cut * bw
grid, delta = np.linspace(a, b, int(gridsize), retstep=True)
RANGE = b - a
# TODO: Fix this?
# This is the Silverman binning function, but I believe it's buggy (SS)
# weighting according to Silverman
# count = counts(x,grid)
# binned = np.zeros_like(grid) #xi_{k} in Silverman
# j = 0
# for k in range(int(gridsize-1)):
# if count[k]>0: # there are points of x in the grid here
# Xingrid = x[j:j+count[k]] # get all these points
# # get weights at grid[k],grid[k+1]
# binned[k] += np.sum(grid[k+1]-Xingrid)
# binned[k+1] += np.sum(Xingrid-grid[k])
# j += count[k]
# binned /= (nobs)*delta**2 # normalize binned to sum to 1/delta
# NOTE: THE ABOVE IS WRONG, JUST TRY WITH LINEAR BINNING
binned = fast_linbin(x, a, b, gridsize) / (delta * nobs)
# step 2 compute FFT of the weights, using Munro (1976) FFT convention
y = forrt(binned)
# step 3 and 4 for optimal bw compute zstar and the density estimate f
# do not have to redo the above if just changing bw, ie., for cross val
# NOTE: silverman_transform is the closed form solution of the FFT of the
# gaussian kernel. Not yet sure how to generalize it.
zstar = silverman_transform(bw, gridsize, RANGE) * y
# 3.49 in Silverman
# 3.50 w Gaussian kernel
f = revrt(zstar)
if retgrid:
return f, grid, bw
else:
return f, bw | Rosenblatt-Parzen univariate kernel density estimator
Parameters
----------
x : array_like
The variable for which the density estimate is desired.
kernel : str
ONLY GAUSSIAN IS CURRENTLY IMPLEMENTED.
"bi" for biweight
"cos" for cosine
"epa" for Epanechnikov, default
"epa2" for alternative Epanechnikov
"gau" for Gaussian.
"par" for Parzen
"rect" for rectangular
"tri" for triangular
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
weights : array or None
WEIGHTS ARE NOT CURRENTLY IMPLEMENTED.
Optional weights. If the x value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, min(len(x), 512) is used. Note that the provided
number is rounded up to the next highest power of 2.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in x that are outside of the range given by clip are
dropped. The number of observations in x is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of x
so that the kernel goes to zero. The end points are
-/+ cut*bw*{x.min() or x.max()}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : ndarray
The densities estimated at the grid points.
grid : ndarray, optional
The grid points at which the density is estimated.
Notes
-----
Only the default kernel is implemented. Weights are not implemented yet.
This follows Silverman (1982) with changes suggested by Jones and Lotwick
(1984). However, the discretization step is replaced by linear binning
of Fan and Marron (1994). This should be extended to accept the parts
that are dependent only on the data to speed things up for
cross-validation.
References
----------
Fan, J. and J.S. Marron. (1994) `Fast implementations of nonparametric
curve estimators`. Journal of Computational and Graphical Statistics.
3.1, 35-56.
Jones, M.C. and H.W. Lotwick. (1984) `Remark AS R50: A Remark on Algorithm
AS 176. Kernal Density Estimation Using the Fast Fourier Transform`.
Journal of the Royal Statistical Society. Series C. 33.1, 120-2.
Silverman, B.W. (1982) `Algorithm AS 176. Kernel density estimation using
the Fast Fourier Transform. Journal of the Royal Statistical Society.
Series C. 31.2, 93-9. | kdensityfft | python | statsmodels/statsmodels | statsmodels/nonparametric/kde.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kde.py | BSD-3-Clause |
def __repr__(self):
"""Provide something sane to print."""
rpr = "KDE instance\n"
rpr += "Number of variables: k_vars = " + str(self.k_vars) + "\n"
rpr += "Number of samples: nobs = " + str(self.nobs) + "\n"
rpr += "Variable types: " + self.var_type + "\n"
rpr += "BW selection method: " + self._bw_method + "\n"
return rpr | Provide something sane to print. | __repr__ | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_density.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_density.py | BSD-3-Clause |
def _get_class_vars_type(self):
"""Helper method to be able to pass needed vars to _compute_subset."""
class_type = 'KDEMultivariate'
class_vars = (self.var_type, )
return class_type, class_vars | Helper method to be able to pass needed vars to _compute_subset. | _get_class_vars_type | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_density.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_density.py | BSD-3-Clause |
def __repr__(self):
"""Provide something sane to print."""
rpr = "KDEMultivariateConditional instance\n"
rpr += "Number of independent variables: k_indep = " + \
str(self.k_indep) + "\n"
rpr += "Number of dependent variables: k_dep = " + \
str(self.k_dep) + "\n"
rpr += "Number of observations: nobs = " + str(self.nobs) + "\n"
rpr += "Independent variable types: " + self.indep_type + "\n"
rpr += "Dependent variable types: " + self.dep_type + "\n"
rpr += "BW selection method: " + self._bw_method + "\n"
return rpr | Provide something sane to print. | __repr__ | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_density.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_density.py | BSD-3-Clause |
def loo_likelihood(self, bw, func=lambda x: x):
"""
Returns the leave-one-out conditional likelihood of the data.
If `func` is not equal to the default, what's calculated is a function
of the leave-one-out conditional likelihood.
Parameters
----------
bw : array_like
The bandwidth parameter(s).
func : callable, optional
Function to transform the likelihood values (before summing); for
the log likelihood, use ``func=np.log``. Default is ``f(x) = x``.
Returns
-------
L : float
The value of the leave-one-out function for the data.
Notes
-----
Similar to ``KDE.loo_likelihood`, but substitute ``f(y|x)=f(x,y)/f(x)``
for ``f(x)``.
"""
yLOO = LeaveOneOut(self.data)
xLOO = LeaveOneOut(self.exog).__iter__()
L = 0
for i, Y_j in enumerate(yLOO):
X_not_i = next(xLOO)
f_yx = gpke(bw, data=-Y_j, data_predict=-self.data[i, :],
var_type=(self.dep_type + self.indep_type))
f_x = gpke(bw[self.k_dep:], data=-X_not_i,
data_predict=-self.exog[i, :],
var_type=self.indep_type)
f_i = f_yx / f_x
L += func(f_i)
return -L | Returns the leave-one-out conditional likelihood of the data.
If `func` is not equal to the default, what's calculated is a function
of the leave-one-out conditional likelihood.
Parameters
----------
bw : array_like
The bandwidth parameter(s).
func : callable, optional
Function to transform the likelihood values (before summing); for
the log likelihood, use ``func=np.log``. Default is ``f(x) = x``.
Returns
-------
L : float
The value of the leave-one-out function for the data.
Notes
-----
Similar to ``KDE.loo_likelihood`, but substitute ``f(y|x)=f(x,y)/f(x)``
for ``f(x)``. | loo_likelihood | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_density.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_density.py | BSD-3-Clause |
def _get_class_vars_type(self):
"""Helper method to be able to pass needed vars to _compute_subset."""
class_type = 'KDEMultivariateConditional'
class_vars = (self.k_dep, self.dep_type, self.indep_type)
return class_type, class_vars | Helper method to be able to pass needed vars to _compute_subset. | _get_class_vars_type | python | statsmodels/statsmodels | statsmodels/nonparametric/kernel_density.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/kernel_density.py | BSD-3-Clause |
def _select_sigma(x, percentile=25):
"""
Returns the smaller of std(X, ddof=1) or normalized IQR(X) over axis 0.
References
----------
Silverman (1986) p.47
"""
# normalize = norm.ppf(.75) - norm.ppf(.25)
normalize = 1.349
IQR = (scoreatpercentile(x, 75) - scoreatpercentile(x, 25)) / normalize
std_dev = np.std(x, axis=0, ddof=1)
if IQR > 0:
return np.minimum(std_dev, IQR)
else:
return std_dev | Returns the smaller of std(X, ddof=1) or normalized IQR(X) over axis 0.
References
----------
Silverman (1986) p.47 | _select_sigma | python | statsmodels/statsmodels | statsmodels/nonparametric/bandwidths.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/bandwidths.py | BSD-3-Clause |
def bw_scott(x, kernel=None):
"""
Scott's Rule of Thumb
Parameters
----------
x : array_like
Array for which to get the bandwidth
kernel : CustomKernel object
Unused
Returns
-------
bw : float
The estimate of the bandwidth
Notes
-----
Returns 1.059 * A * n ** (-1/5.) where ::
A = min(std(x, ddof=1), IQR/1.349)
IQR = np.subtract.reduce(np.percentile(x, [75,25]))
References
----------
Scott, D.W. (1992) Multivariate Density Estimation: Theory, Practice, and
Visualization.
"""
A = _select_sigma(x)
n = len(x)
return 1.059 * A * n ** (-0.2) | Scott's Rule of Thumb
Parameters
----------
x : array_like
Array for which to get the bandwidth
kernel : CustomKernel object
Unused
Returns
-------
bw : float
The estimate of the bandwidth
Notes
-----
Returns 1.059 * A * n ** (-1/5.) where ::
A = min(std(x, ddof=1), IQR/1.349)
IQR = np.subtract.reduce(np.percentile(x, [75,25]))
References
----------
Scott, D.W. (1992) Multivariate Density Estimation: Theory, Practice, and
Visualization. | bw_scott | python | statsmodels/statsmodels | statsmodels/nonparametric/bandwidths.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/bandwidths.py | BSD-3-Clause |
def bw_silverman(x, kernel=None):
"""
Silverman's Rule of Thumb
Parameters
----------
x : array_like
Array for which to get the bandwidth
kernel : CustomKernel object
Unused
Returns
-------
bw : float
The estimate of the bandwidth
Notes
-----
Returns .9 * A * n ** (-1/5.) where ::
A = min(std(x, ddof=1), IQR/1.349)
IQR = np.subtract.reduce(np.percentile(x, [75,25]))
References
----------
Silverman, B.W. (1986) `Density Estimation.`
"""
A = _select_sigma(x)
n = len(x)
return .9 * A * n ** (-0.2) | Silverman's Rule of Thumb
Parameters
----------
x : array_like
Array for which to get the bandwidth
kernel : CustomKernel object
Unused
Returns
-------
bw : float
The estimate of the bandwidth
Notes
-----
Returns .9 * A * n ** (-1/5.) where ::
A = min(std(x, ddof=1), IQR/1.349)
IQR = np.subtract.reduce(np.percentile(x, [75,25]))
References
----------
Silverman, B.W. (1986) `Density Estimation.` | bw_silverman | python | statsmodels/statsmodels | statsmodels/nonparametric/bandwidths.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/bandwidths.py | BSD-3-Clause |
def bw_normal_reference(x, kernel=None):
"""
Plug-in bandwidth with kernel specific constant based on normal reference.
This bandwidth minimizes the mean integrated square error if the true
distribution is the normal. This choice is an appropriate bandwidth for
single peaked distributions that are similar to the normal distribution.
Parameters
----------
x : array_like
Array for which to get the bandwidth
kernel : CustomKernel object
Used to calculate the constant for the plug-in bandwidth.
The default is a Gaussian kernel.
Returns
-------
bw : float
The estimate of the bandwidth
Notes
-----
Returns C * A * n ** (-1/5.) where ::
A = min(std(x, ddof=1), IQR/1.349)
IQR = np.subtract.reduce(np.percentile(x, [75,25]))
C = constant from Hansen (2009)
When using a Gaussian kernel this is equivalent to the 'scott' bandwidth up
to two decimal places. This is the accuracy to which the 'scott' constant is
specified.
References
----------
Silverman, B.W. (1986) `Density Estimation.`
Hansen, B.E. (2009) `Lecture Notes on Nonparametrics.`
"""
if kernel is None:
kernel = kernels.Gaussian()
C = kernel.normal_reference_constant
A = _select_sigma(x)
n = len(x)
return C * A * n ** (-0.2) | Plug-in bandwidth with kernel specific constant based on normal reference.
This bandwidth minimizes the mean integrated square error if the true
distribution is the normal. This choice is an appropriate bandwidth for
single peaked distributions that are similar to the normal distribution.
Parameters
----------
x : array_like
Array for which to get the bandwidth
kernel : CustomKernel object
Used to calculate the constant for the plug-in bandwidth.
The default is a Gaussian kernel.
Returns
-------
bw : float
The estimate of the bandwidth
Notes
-----
Returns C * A * n ** (-1/5.) where ::
A = min(std(x, ddof=1), IQR/1.349)
IQR = np.subtract.reduce(np.percentile(x, [75,25]))
C = constant from Hansen (2009)
When using a Gaussian kernel this is equivalent to the 'scott' bandwidth up
to two decimal places. This is the accuracy to which the 'scott' constant is
specified.
References
----------
Silverman, B.W. (1986) `Density Estimation.`
Hansen, B.E. (2009) `Lecture Notes on Nonparametrics.` | bw_normal_reference | python | statsmodels/statsmodels | statsmodels/nonparametric/bandwidths.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/bandwidths.py | BSD-3-Clause |
def select_bandwidth(x, bw, kernel):
"""
Selects bandwidth for a selection rule bw
this is a wrapper around existing bandwidth selection rules
Parameters
----------
x : array_like
Array for which to get the bandwidth
bw : str
name of bandwidth selection rule, currently supported are:
%s
kernel : not used yet
Returns
-------
bw : float
The estimate of the bandwidth
"""
bw = bw.lower()
if bw not in bandwidth_funcs:
raise ValueError("Bandwidth %s not understood" % bw)
bandwidth = bandwidth_funcs[bw](x, kernel)
if np.any(bandwidth == 0):
# eventually this can fall back on another selection criterion.
err = "Selected KDE bandwidth is 0. Cannot estimate density. " \
"Either provide the bandwidth during initialization or use " \
"an alternative method."
raise RuntimeError(err)
else:
return bandwidth | Selects bandwidth for a selection rule bw
this is a wrapper around existing bandwidth selection rules
Parameters
----------
x : array_like
Array for which to get the bandwidth
bw : str
name of bandwidth selection rule, currently supported are:
%s
kernel : not used yet
Returns
-------
bw : float
The estimate of the bandwidth | select_bandwidth | python | statsmodels/statsmodels | statsmodels/nonparametric/bandwidths.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/bandwidths.py | BSD-3-Clause |
def write2file(self, file_name, data): # pragma: no cover
"""Write some data to a csv file. Only use for debugging!"""
import csv
data_file = csv.writer(open(file_name, "w", encoding="utf-8"))
data = np.column_stack(data)
nobs = max(np.shape(data))
K = min(np.shape(data))
data = np.reshape(data, (nobs,K))
for i in range(nobs):
data_file.writerow(list(data[i, :])) | Write some data to a csv file. Only use for debugging! | write2file | python | statsmodels/statsmodels | statsmodels/nonparametric/tests/test_kernel_regression.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/tests/test_kernel_regression.py | BSD-3-Clause |
def predict(self, params, exog=None, exog_precision=None, which="mean"):
"""Predict values for mean or precision
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision parameter.
which : str
- "mean" : mean, conditional expectation E(endog | exog)
- "precision" : predicted precision
- "linear" : linear predictor for the mean function
- "linear-precision" : linear predictor for the precision parameter
Returns
-------
ndarray, predicted values
"""
# compatibility with old names and misspelling
if which == "linpred":
which = "linear"
if which in ["linpred_precision", "linear_precision"]:
which = "linear-precision"
k_mean = self.exog.shape[1]
if which in ["mean", "linear"]:
if exog is None:
exog = self.exog
params_mean = params[:k_mean]
# Zparams = params[k_mean:]
linpred = np.dot(exog, params_mean)
if which == "mean":
mu = self.link.inverse(linpred)
res = mu
else:
res = linpred
elif which in ["precision", "linear-precision"]:
if exog_precision is None:
exog_precision = self.exog_precision
params_prec = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_prec)
if which == "precision":
phi = self.link_precision.inverse(linpred_prec)
res = phi
else:
res = linpred_prec
elif which == "var":
res = self._predict_var(
params,
exog=exog,
exog_precision=exog_precision
)
else:
raise ValueError('which = %s is not available' % which)
return res | Predict values for mean or precision
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision parameter.
which : str
- "mean" : mean, conditional expectation E(endog | exog)
- "precision" : predicted precision
- "linear" : linear predictor for the mean function
- "linear-precision" : linear predictor for the precision parameter
Returns
-------
ndarray, predicted values | predict | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def _predict_precision(self, params, exog_precision=None):
"""Predict values for precision function for given exog_precision.
Parameters
----------
params : array_like
The model parameters.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted precision.
"""
if exog_precision is None:
exog_precision = self.exog_precision
k_mean = self.exog.shape[1]
params_precision = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_precision)
phi = self.link_precision.inverse(linpred_prec)
return phi | Predict values for precision function for given exog_precision.
Parameters
----------
params : array_like
The model parameters.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted precision. | _predict_precision | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def _predict_var(self, params, exog=None, exog_precision=None):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted conditional variance.
"""
mean = self.predict(params, exog=exog)
precision = self._predict_precision(params,
exog_precision=exog_precision)
var_endog = mean * (1 - mean) / (1 + precision)
return var_endog | predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted conditional variance. | _predict_var | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def loglikeobs(self, params):
"""
Loglikelihood for observations of the Beta regressionmodel.
Parameters
----------
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
"""
return self._llobs(self.endog, self.exog, self.exog_precision, params) | Loglikelihood for observations of the Beta regressionmodel.
Parameters
----------
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. | loglikeobs | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def _llobs(self, endog, exog, exog_precision, params):
"""
Loglikelihood for observations with data arguments.
Parameters
----------
endog : ndarray
1d array of endogenous variable.
exog : ndarray
2d array of explanatory variables.
exog_precision : ndarray
2d array of explanatory variables for precision.
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
"""
y, X, Z = endog, exog, exog_precision
nz = Z.shape[1]
params_mean = params[:-nz]
params_prec = params[-nz:]
linpred = np.dot(X, params_mean)
linpred_prec = np.dot(Z, params_prec)
mu = self.link.inverse(linpred)
phi = self.link_precision.inverse(linpred_prec)
eps_lb = 1e-200
alpha = np.clip(mu * phi, eps_lb, np.inf)
beta = np.clip((1 - mu) * phi, eps_lb, np.inf)
ll = (lgamma(phi) - lgamma(alpha)
- lgamma(beta)
+ (mu * phi - 1) * np.log(y)
+ (((1 - mu) * phi) - 1) * np.log(1 - y))
return ll | Loglikelihood for observations with data arguments.
Parameters
----------
endog : ndarray
1d array of endogenous variable.
exog : ndarray
2d array of explanatory variables.
exog_precision : ndarray
2d array of explanatory variables for precision.
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. | _llobs | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def score(self, params):
"""
Returns the score vector of the log-likelihood.
http://www.tandfonline.com/doi/pdf/10.1080/00949650903389993
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score : ndarray
First derivative of loglikelihood function.
"""
sf1, sf2 = self.score_factor(params)
d1 = np.dot(sf1, self.exog)
d2 = np.dot(sf2, self.exog_precision)
return np.concatenate((d1, d2)) | Returns the score vector of the log-likelihood.
http://www.tandfonline.com/doi/pdf/10.1080/00949650903389993
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score : ndarray
First derivative of loglikelihood function. | score | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def _score_check(self, params):
"""Inherited score with finite differences
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score based on numerical derivatives
"""
return super().score(params) | Inherited score with finite differences
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score based on numerical derivatives | _score_check | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def score_factor(self, params, endog=None):
"""Derivative of loglikelihood function w.r.t. linear predictors.
This needs to be multiplied with the exog to obtain the score_obs.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
Notes
-----
The score_obs can be obtained from score_factor ``sf`` using
- d1 = sf[:, :1] * exog
- d2 = sf[:, 1:2] * exog_precision
"""
from scipy import special
digamma = special.psi
y = self.endog if endog is None else endog
X, Z = self.exog, self.exog_precision
nz = Z.shape[1]
Xparams = params[:-nz]
Zparams = params[-nz:]
# NO LINKS
mu = self.link.inverse(np.dot(X, Xparams))
phi = self.link_precision.inverse(np.dot(Z, Zparams))
eps_lb = 1e-200 # lower bound for evaluating digamma, avoids -inf
alpha = np.clip(mu * phi, eps_lb, np.inf)
beta = np.clip((1 - mu) * phi, eps_lb, np.inf)
ystar = np.log(y / (1. - y))
dig_beta = digamma(beta)
mustar = digamma(alpha) - dig_beta
yt = np.log(1 - y)
mut = dig_beta - digamma(phi)
t = 1. / self.link.deriv(mu)
h = 1. / self.link_precision.deriv(phi)
#
sf1 = phi * t * (ystar - mustar)
sf2 = h * (mu * (ystar - mustar) + yt - mut)
return (sf1, sf2) | Derivative of loglikelihood function w.r.t. linear predictors.
This needs to be multiplied with the exog to obtain the score_obs.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
Notes
-----
The score_obs can be obtained from score_factor ``sf`` using
- d1 = sf[:, :1] * exog
- d2 = sf[:, 1:2] * exog_precision | score_factor | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def score_hessian_factor(self, params, return_hessian=False,
observed=True):
"""Derivatives of loglikelihood function w.r.t. linear predictors.
This calculates score and hessian factors at the same time, because
there is a large overlap in calculations.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
return_hessian : bool
If False, then only score_factors are returned
If True, the both score and hessian factors are returned
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
(-jbb, -jbg, -jgg) : tuple
A tuple with 3 hessian factors, corresponding to the upper
triangle of the Hessian matrix.
TODO: check why there are minus
"""
from scipy import special
digamma = special.psi
y, X, Z = self.endog, self.exog, self.exog_precision
nz = Z.shape[1]
Xparams = params[:-nz]
Zparams = params[-nz:]
# NO LINKS
mu = self.link.inverse(np.dot(X, Xparams))
phi = self.link_precision.inverse(np.dot(Z, Zparams))
# We need to prevent mu = 0 and (1-mu) = 0 in digamma call
eps_lb = 1e-200 # lower bound for evaluating digamma, avoids -inf
alpha = np.clip(mu * phi, eps_lb, np.inf)
beta = np.clip((1 - mu) * phi, eps_lb, np.inf)
ystar = np.log(y / (1. - y))
dig_beta = digamma(beta)
mustar = digamma(alpha) - dig_beta
yt = np.log(1 - y)
mut = dig_beta - digamma(phi)
t = 1. / self.link.deriv(mu)
h = 1. / self.link_precision.deriv(phi)
ymu_star = (ystar - mustar)
sf1 = phi * t * ymu_star
sf2 = h * (mu * ymu_star + yt - mut)
if return_hessian:
def trigamma(x):
return special.polygamma(1, x)
trig_beta = trigamma(beta)
var_star = trigamma(alpha) + trig_beta
var_t = trig_beta - trigamma(phi)
c = - trig_beta
s = self.link.deriv2(mu)
q = self.link_precision.deriv2(phi)
jbb = (phi * t) * var_star
if observed:
jbb += s * t**2 * ymu_star
jbb *= t * phi
jbg = phi * t * h * (mu * var_star + c)
if observed:
jbg -= ymu_star * t * h
jgg = h**2 * (mu**2 * var_star + 2 * mu * c + var_t)
if observed:
jgg += (mu * ymu_star + yt - mut) * q * h**3 # **3 ?
return (sf1, sf2), (-jbb, -jbg, -jgg)
else:
return (sf1, sf2) | Derivatives of loglikelihood function w.r.t. linear predictors.
This calculates score and hessian factors at the same time, because
there is a large overlap in calculations.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
return_hessian : bool
If False, then only score_factors are returned
If True, the both score and hessian factors are returned
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
(-jbb, -jbg, -jgg) : tuple
A tuple with 3 hessian factors, corresponding to the upper
triangle of the Hessian matrix.
TODO: check why there are minus | score_hessian_factor | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def score_obs(self, params):
"""
Score, first derivative of the loglikelihood for each observation.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_obs : ndarray, 2d
The first derivative of the loglikelihood function evaluated at
params for each observation.
"""
sf1, sf2 = self.score_factor(params)
# elementwise product for each row (observation)
d1 = sf1[:, None] * self.exog
d2 = sf2[:, None] * self.exog_precision
return np.column_stack((d1, d2)) | Score, first derivative of the loglikelihood for each observation.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_obs : ndarray, 2d
The first derivative of the loglikelihood function evaluated at
params for each observation. | score_obs | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def hessian(self, params, observed=None):
"""Hessian, second derivative of loglikelihood function
Parameters
----------
params : ndarray
Parameter at which Hessian is evaluated.
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
hessian : ndarray
Hessian, i.e. observed information, or expected information matrix.
"""
if self.hess_type == "eim":
observed = False
else:
observed = True
_, hf = self.score_hessian_factor(params, return_hessian=True,
observed=observed)
hf11, hf12, hf22 = hf
# elementwise product for each row (observation)
d11 = (self.exog.T * hf11).dot(self.exog)
d12 = (self.exog.T * hf12).dot(self.exog_precision)
d22 = (self.exog_precision.T * hf22).dot(self.exog_precision)
return np.block([[d11, d12], [d12.T, d22]]) | Hessian, second derivative of loglikelihood function
Parameters
----------
params : ndarray
Parameter at which Hessian is evaluated.
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
hessian : ndarray
Hessian, i.e. observed information, or expected information matrix. | hessian | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def hessian_factor(self, params, observed=True):
"""Derivatives of loglikelihood function w.r.t. linear predictors.
"""
_, hf = self.score_hessian_factor(params, return_hessian=True,
observed=observed)
return hf | Derivatives of loglikelihood function w.r.t. linear predictors. | hessian_factor | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def _start_params(self, niter=2, return_intermediate=False):
"""find starting values
Parameters
----------
niter : int
Number of iterations of WLS approximation
return_intermediate : bool
If False (default), then only the preliminary parameter estimate
will be returned.
If True, then also the two results instances of the WLS estimate
for mean parameters and for the precision parameters will be
returned.
Returns
-------
sp : ndarray
start parameters for the optimization
res_m2 : results instance (optional)
Results instance for the WLS regression of the mean function.
res_p2 : results instance (optional)
Results instance for the WLS regression of the precision function.
Notes
-----
This calculates a few iteration of weighted least squares. This is not
a full scoring algorithm.
"""
# WLS of the mean equation uses the implied weights (inverse variance),
# WLS for the precision equations uses weights that only take
# account of the link transformation of the precision endog.
from statsmodels.regression.linear_model import OLS, WLS
res_m = OLS(self.link(self.endog), self.exog).fit()
fitted = self.link.inverse(res_m.fittedvalues)
resid = self.endog - fitted
prec_i = fitted * (1 - fitted) / np.maximum(np.abs(resid), 1e-2)**2 - 1
res_p = OLS(self.link_precision(prec_i), self.exog_precision).fit()
prec_fitted = self.link_precision.inverse(res_p.fittedvalues)
# sp = np.concatenate((res_m.params, res_p.params))
for _ in range(niter):
y_var_inv = (1 + prec_fitted) / (fitted * (1 - fitted))
# y_var = fitted * (1 - fitted) / (1 + prec_fitted)
ylink_var_inv = y_var_inv / self.link.deriv(fitted)**2
res_m2 = WLS(self.link(self.endog), self.exog,
weights=ylink_var_inv).fit()
fitted = self.link.inverse(res_m2.fittedvalues)
resid2 = self.endog - fitted
prec_i2 = (fitted * (1 - fitted) /
np.maximum(np.abs(resid2), 1e-2)**2 - 1)
w_p = 1. / self.link_precision.deriv(prec_fitted)**2
res_p2 = WLS(self.link_precision(prec_i2), self.exog_precision,
weights=w_p).fit()
prec_fitted = self.link_precision.inverse(res_p2.fittedvalues)
sp2 = np.concatenate((res_m2.params, res_p2.params))
if return_intermediate:
return sp2, res_m2, res_p2
return sp2 | find starting values
Parameters
----------
niter : int
Number of iterations of WLS approximation
return_intermediate : bool
If False (default), then only the preliminary parameter estimate
will be returned.
If True, then also the two results instances of the WLS estimate
for mean parameters and for the precision parameters will be
returned.
Returns
-------
sp : ndarray
start parameters for the optimization
res_m2 : results instance (optional)
Results instance for the WLS regression of the mean function.
res_p2 : results instance (optional)
Results instance for the WLS regression of the precision function.
Notes
-----
This calculates a few iteration of weighted least squares. This is not
a full scoring algorithm. | _start_params | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def fit(self, start_params=None, maxiter=1000, disp=False,
method='bfgs', **kwds):
"""
Fit the model by maximum likelihood.
Parameters
----------
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
maxiter : integer
The maximum number of iterations
disp : bool
Show convergence stats.
method : str
The optimization method to use.
kwds :
Keyword arguments for the optimizer.
Returns
-------
BetaResults instance.
"""
if start_params is None:
start_params = self._start_params()
# # http://www.ime.usp.br/~sferrari/beta.pdf suggests starting phi
# # on page 8
if "cov_type" in kwds:
# this is a workaround because we cannot tell super to use eim
if kwds["cov_type"].lower() == "eim":
self.hess_type = "eim"
del kwds["cov_type"]
else:
self.hess_type = "oim"
res = super().fit(start_params=start_params,
maxiter=maxiter, method=method,
disp=disp, **kwds)
if not isinstance(res, BetaResultsWrapper):
# currently GenericLikelihoodModel doe not add wrapper
res = BetaResultsWrapper(res)
return res | Fit the model by maximum likelihood.
Parameters
----------
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
maxiter : integer
The maximum number of iterations
disp : bool
Show convergence stats.
method : str
The optimization method to use.
kwds :
Keyword arguments for the optimizer.
Returns
-------
BetaResults instance. | fit | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
not verified yet
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
link = self.link
lin_pred = self.predict(params, which="linear")
idl = link.inverse_deriv(lin_pred)
dmat = self.exog * idl[:, None]
return np.column_stack((dmat, np.zeros(self.exog_precision.shape))) | Derivative of the expected endog with respect to the parameters.
not verified yet
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector. | _deriv_mean_dparams | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
sf = self.score_factor(params, endog=y)
return np.column_stack(sf)
dsf = _approx_fprime_cs_scalar(self.endog[:, None], f)
# deriv is 2d vector
d1 = dsf[:, :1] * self.exog
d2 = dsf[:, 1:2] * self.exog_precision
return np.column_stack((d1, d2)) | derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog. | _deriv_score_obs_dendog | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def get_distribution_params(self, params, exog=None, exog_precision=None):
"""
Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution.
"""
mean = self.predict(params, exog=exog)
precision = self.predict(params, exog_precision=exog_precision,
which="precision")
return precision * mean, precision * (1 - mean) | Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution. | get_distribution_params | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def get_distribution(self, params, exog=None, exog_precision=None):
"""
Return a instance of the predictive distribution.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
"""
from scipy import stats
args = self.get_distribution_params(params, exog=exog,
exog_precision=exog_precision)
distr = stats.beta(*args)
return distr | Return a instance of the predictive distribution.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced. | get_distribution | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def fittedvalues(self):
"""In-sample predicted mean, conditional expectation."""
return self.model.predict(self.params) | In-sample predicted mean, conditional expectation. | fittedvalues | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def fitted_precision(self):
"""In-sample predicted precision"""
return self.model.predict(self.params, which="precision") | In-sample predicted precision | fitted_precision | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def resid(self):
"""Response residual"""
return self.model.endog - self.fittedvalues | Response residual | resid | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def resid_pearson(self):
"""Pearson standardize residual"""
std = np.sqrt(self.model.predict(self.params, which="var"))
return self.resid / std | Pearson standardize residual | resid_pearson | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def prsquared(self):
"""Cox-Snell Likelihood-Ratio pseudo-R-squared.
1 - exp((llnull - .llf) * (2 / nobs))
"""
return self.pseudo_rsquared(kind="lr") | Cox-Snell Likelihood-Ratio pseudo-R-squared.
1 - exp((llnull - .llf) * (2 / nobs)) | prsquared | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def get_distribution_params(self, exog=None, exog_precision=None,
transform=True):
"""
Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution.
"""
mean = self.predict(exog=exog, transform=transform)
precision = self.predict(exog_precision=exog_precision,
which="precision", transform=transform)
return precision * mean, precision * (1 - mean) | Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution. | get_distribution_params | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def get_distribution(self, exog=None, exog_precision=None, transform=True):
"""
Return a instance of the predictive distribution.
Parameters
----------
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
"""
from scipy import stats
args = self.get_distribution_params(exog=exog,
exog_precision=exog_precision,
transform=transform)
args = (np.asarray(arg) for arg in args)
distr = stats.beta(*args)
return distr | Return a instance of the predictive distribution.
Parameters
----------
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced. | get_distribution | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def get_influence(self):
"""
Get an instance of MLEInfluence with influence and outlier measures
Returns
-------
infl : MLEInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
Notes
-----
Support for mutli-link and multi-exog models is still experimental
in MLEInfluence. Interface and some definitions might still change.
Note: Difference to R betareg: Betareg has the same general leverage
as this model. However, they use a linear approximation hat matrix
to scale and studentize influence and residual statistics.
MLEInfluence uses the generalized leverage as hat_matrix_diag.
Additionally, MLEInfluence uses pearson residuals for residual
analusis.
References
----------
todo
"""
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self) | Get an instance of MLEInfluence with influence and outlier measures
Returns
-------
infl : MLEInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
Notes
-----
Support for mutli-link and multi-exog models is still experimental
in MLEInfluence. Interface and some definitions might still change.
Note: Difference to R betareg: Betareg has the same general leverage
as this model. However, they use a linear approximation hat matrix
to scale and studentize influence and residual statistics.
MLEInfluence uses the generalized leverage as hat_matrix_diag.
Additionally, MLEInfluence uses pearson residuals for residual
analusis.
References
----------
todo | get_influence | python | statsmodels/statsmodels | statsmodels/othermod/betareg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/othermod/betareg.py | BSD-3-Clause |
def logsumexp(a, axis=None):
"""
Compute the log of the sum of exponentials log(e^{a_1}+...e^{a_n}) of a
Avoids numerical overflow.
Parameters
----------
a : array_like
The vector to exponentiate and sum
axis : int, optional
The axis along which to apply the operation. Defaults is None.
Returns
-------
sum(log(exp(a)))
Notes
-----
This function was taken from the mailing list
http://mail.scipy.org/pipermail/scipy-user/2009-October/022931.html
This should be superceded by the ufunc when it is finished.
"""
if axis is None:
# Use the scipy.maxentropy version.
return sp_logsumexp(a)
a = np.asarray(a)
shp = list(a.shape)
shp[axis] = 1
a_max = a.max(axis=axis)
s = np.log(np.exp(a - a_max.reshape(shp)).sum(axis=axis))
lse = a_max + s
return lse | Compute the log of the sum of exponentials log(e^{a_1}+...e^{a_n}) of a
Avoids numerical overflow.
Parameters
----------
a : array_like
The vector to exponentiate and sum
axis : int, optional
The axis along which to apply the operation. Defaults is None.
Returns
-------
sum(log(exp(a)))
Notes
-----
This function was taken from the mailing list
http://mail.scipy.org/pipermail/scipy-user/2009-October/022931.html
This should be superceded by the ufunc when it is finished. | logsumexp | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def _isproperdist(X):
"""
Checks to see if `X` is a proper probability distribution
"""
X = np.asarray(X)
if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1):
return False
else:
return True | Checks to see if `X` is a proper probability distribution | _isproperdist | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def discretize(X, method="ef", nbins=None):
"""
Discretize `X`
Parameters
----------
bins : int, optional
Number of bins. Default is floor(sqrt(N))
method : str
"ef" is equal-frequency binning
"ew" is equal-width binning
Examples
--------
"""
nobs = len(X)
if nbins is None:
nbins = np.floor(np.sqrt(nobs))
if method == "ef":
discrete = np.ceil(nbins * stats.rankdata(X)/nobs)
if method == "ew":
width = np.max(X) - np.min(X)
width = np.floor(width/nbins)
svec, ivec = stats.fastsort(X)
discrete = np.zeros(nobs)
binnum = 1
base = svec[0]
discrete[ivec[0]] = binnum
for i in range(1,nobs):
if svec[i] < base + width:
discrete[ivec[i]] = binnum
else:
base = svec[i]
binnum += 1
discrete[ivec[i]] = binnum
return discrete | Discretize `X`
Parameters
----------
bins : int, optional
Number of bins. Default is floor(sqrt(N))
method : str
"ef" is equal-frequency binning
"ew" is equal-width binning
Examples
-------- | discretize | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def logbasechange(a,b):
"""
There is a one-to-one transformation of the entropy value from
a log base b to a log base a :
H_{b}(X)=log_{b}(a)[H_{a}(X)]
Returns
-------
log_{b}(a)
"""
return np.log(b)/np.log(a) | There is a one-to-one transformation of the entropy value from
a log base b to a log base a :
H_{b}(X)=log_{b}(a)[H_{a}(X)]
Returns
-------
log_{b}(a) | logbasechange | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def natstobits(X):
"""
Converts from nats to bits
"""
return logbasechange(np.e, 2) * X | Converts from nats to bits | natstobits | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def bitstonats(X):
"""
Converts from bits to nats
"""
return logbasechange(2, np.e) * X | Converts from bits to nats | bitstonats | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def shannonentropy(px, logbase=2):
"""
This is Shannon's entropy
Parameters
----------
logbase, int or np.e
The base of the log
px : 1d or 2d array_like
Can be a discrete probability distribution, a 2d joint distribution,
or a sequence of probabilities.
Returns
-----
For log base 2 (bits) given a discrete distribution
H(p) = sum(px * log2(1/px) = -sum(pk*log2(px)) = E[log2(1/p(X))]
For log base 2 (bits) given a joint distribution
H(px,py) = -sum_{k,j}*w_{kj}log2(w_{kj})
Notes
-----
shannonentropy(0) is defined as 0
"""
#TODO: have not defined the px,py case?
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError("px does not define proper distribution")
entropy = -np.sum(np.nan_to_num(px*np.log2(px)))
if logbase != 2:
return logbasechange(2,logbase) * entropy
else:
return entropy | This is Shannon's entropy
Parameters
----------
logbase, int or np.e
The base of the log
px : 1d or 2d array_like
Can be a discrete probability distribution, a 2d joint distribution,
or a sequence of probabilities.
Returns
-----
For log base 2 (bits) given a discrete distribution
H(p) = sum(px * log2(1/px) = -sum(pk*log2(px)) = E[log2(1/p(X))]
For log base 2 (bits) given a joint distribution
H(px,py) = -sum_{k,j}*w_{kj}log2(w_{kj})
Notes
-----
shannonentropy(0) is defined as 0 | shannonentropy | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def shannoninfo(px, logbase=2):
"""
Shannon's information
Parameters
----------
px : float or array_like
`px` is a discrete probability distribution
Returns
-------
For logbase = 2
np.log2(px)
"""
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError("px does not define proper distribution")
if logbase != 2:
return - logbasechange(2,logbase) * np.log2(px)
else:
return - np.log2(px) | Shannon's information
Parameters
----------
px : float or array_like
`px` is a discrete probability distribution
Returns
-------
For logbase = 2
np.log2(px) | shannoninfo | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def condentropy(px, py, pxpy=None, logbase=2):
"""
Return the conditional entropy of X given Y.
Parameters
----------
px : array_like
py : array_like
pxpy : array_like, optional
If pxpy is None, the distributions are assumed to be independent
and conendtropy(px,py) = shannonentropy(px)
logbase : int or np.e
Returns
-------
sum_{kj}log(q_{j}/w_{kj}
where q_{j} = Y[j]
and w_kj = X[k,j]
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy is not None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy is None:
pxpy = np.outer(py,px)
condent = np.sum(pxpy * np.nan_to_num(np.log2(py/pxpy)))
if logbase == 2:
return condent
else:
return logbasechange(2, logbase) * condent | Return the conditional entropy of X given Y.
Parameters
----------
px : array_like
py : array_like
pxpy : array_like, optional
If pxpy is None, the distributions are assumed to be independent
and conendtropy(px,py) = shannonentropy(px)
logbase : int or np.e
Returns
-------
sum_{kj}log(q_{j}/w_{kj}
where q_{j} = Y[j]
and w_kj = X[k,j] | condentropy | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def mutualinfo(px,py,pxpy, logbase=2):
"""
Returns the mutual information between X and Y.
Parameters
----------
px : array_like
Discrete probability distribution of random variable X
py : array_like
Discrete probability distribution of random variable Y
pxpy : 2d array_like
The joint probability distribution of random variables X and Y.
Note that if X and Y are independent then the mutual information
is zero.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
shannonentropy(px) - condentropy(px,py,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy is not None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy is None:
pxpy = np.outer(py,px)
return shannonentropy(px, logbase=logbase) - condentropy(px,py,pxpy,
logbase=logbase) | Returns the mutual information between X and Y.
Parameters
----------
px : array_like
Discrete probability distribution of random variable X
py : array_like
Discrete probability distribution of random variable Y
pxpy : 2d array_like
The joint probability distribution of random variables X and Y.
Note that if X and Y are independent then the mutual information
is zero.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
shannonentropy(px) - condentropy(px,py,pxpy) | mutualinfo | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def corrent(px,py,pxpy,logbase=2):
"""
An information theoretic correlation measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array_like
Discrete probability distribution of random variable X
py : array_like
Discrete probability distribution of random variable Y
pxpy : 2d array_like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,logbase=logbase)
Notes
-----
This is also equivalent to
corrent(px,py,pxpy) = 1 - condent(px,py,pxpy)/shannonentropy(py)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy is not None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy is None:
pxpy = np.outer(py,px)
return mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,
logbase=logbase) | An information theoretic correlation measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array_like
Discrete probability distribution of random variable X
py : array_like
Discrete probability distribution of random variable Y
pxpy : 2d array_like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,logbase=logbase)
Notes
-----
This is also equivalent to
corrent(px,py,pxpy) = 1 - condent(px,py,pxpy)/shannonentropy(py) | corrent | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def covent(px,py,pxpy,logbase=2):
"""
An information theoretic covariance measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array_like
Discrete probability distribution of random variable X
py : array_like
Discrete probability distribution of random variable Y
pxpy : 2d array_like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
Notes
-----
This is also equivalent to
covent(px,py,pxpy) = condent(px,py,pxpy) + condent(py,px,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy is not None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy is None:
pxpy = np.outer(py,px)
# FIXME: these should be `condentropy`, not `condent`
return (condent(px, py, pxpy, logbase=logbase) # noqa:F821 See GH#5756
+ condent(py, px, pxpy, logbase=logbase)) # noqa:F821 See GH#5756 | An information theoretic covariance measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array_like
Discrete probability distribution of random variable X
py : array_like
Discrete probability distribution of random variable Y
pxpy : 2d array_like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
Notes
-----
This is also equivalent to
covent(px,py,pxpy) = condent(px,py,pxpy) + condent(py,px,pxpy) | covent | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def renyientropy(px,alpha=1,logbase=2,measure='R'):
"""
Renyi's generalized entropy
Parameters
----------
px : array_like
Discrete probability distribution of random variable X. Note that
px is assumed to be a proper probability distribution.
logbase : int or np.e, optional
Default is 2 (bits)
alpha : float or inf
The order of the entropy. The default is 1, which in the limit
is just Shannon's entropy. 2 is Renyi (Collision) entropy. If
the string "inf" or numpy.inf is specified the min-entropy is returned.
measure : str, optional
The type of entropy measure desired. 'R' returns Renyi entropy
measure. 'T' returns the Tsallis entropy measure.
Returns
-------
1/(1-alpha)*log(sum(px**alpha))
In the limit as alpha -> 1, Shannon's entropy is returned.
In the limit as alpha -> inf, min-entropy is returned.
"""
#TODO:finish returns
#TODO:add checks for measure
if not _isproperdist(px):
raise ValueError("px is not a proper probability distribution")
alpha = float(alpha)
if alpha == 1:
genent = shannonentropy(px)
if logbase != 2:
return logbasechange(2, logbase) * genent
return genent
elif 'inf' in str(alpha).lower() or alpha == np.inf:
return -np.log(np.max(px))
# gets here if alpha != (1 or inf)
px = px**alpha
genent = np.log(px.sum())
if logbase == 2:
return 1/(1-alpha) * genent
else:
return 1/(1-alpha) * logbasechange(2, logbase) * genent | Renyi's generalized entropy
Parameters
----------
px : array_like
Discrete probability distribution of random variable X. Note that
px is assumed to be a proper probability distribution.
logbase : int or np.e, optional
Default is 2 (bits)
alpha : float or inf
The order of the entropy. The default is 1, which in the limit
is just Shannon's entropy. 2 is Renyi (Collision) entropy. If
the string "inf" or numpy.inf is specified the min-entropy is returned.
measure : str, optional
The type of entropy measure desired. 'R' returns Renyi entropy
measure. 'T' returns the Tsallis entropy measure.
Returns
-------
1/(1-alpha)*log(sum(px**alpha))
In the limit as alpha -> 1, Shannon's entropy is returned.
In the limit as alpha -> inf, min-entropy is returned. | renyientropy | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def gencrossentropy(px,py,pxpy,alpha=1,logbase=2, measure='T'):
"""
Generalized cross-entropy measures.
Parameters
----------
px : array_like
Discrete probability distribution of random variable X
py : array_like
Discrete probability distribution of random variable Y
pxpy : 2d array_like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
measure : str, optional
The measure is the type of generalized cross-entropy desired. 'T' is
the cross-entropy version of the Tsallis measure. 'CR' is Cressie-Read
measure.
""" | Generalized cross-entropy measures.
Parameters
----------
px : array_like
Discrete probability distribution of random variable X
py : array_like
Discrete probability distribution of random variable Y
pxpy : 2d array_like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
measure : str, optional
The measure is the type of generalized cross-entropy desired. 'T' is
the cross-entropy version of the Tsallis measure. 'CR' is Cressie-Read
measure. | gencrossentropy | python | statsmodels/statsmodels | statsmodels/sandbox/infotheo.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/infotheo.py | BSD-3-Clause |
def _compute_sigma(self, resids):
"""
Computes the sigma matrix and update the cholesky decomposition.
"""
M = self._M
nobs = self.nobs
sig = np.dot(resids, resids.T) # faster way to do this?
if not self._dfk:
div = nobs
elif self._dfk.lower() == 'dfk1':
div = np.zeros(M**2)
for i in range(M):
for j in range(M):
div[i+j] = ((self.df_model[i]+1) *\
(self.df_model[j]+1))**(1/2)
div.reshape(M,M)
else: # 'dfk2' error checking is done earlier
div = np.zeros(M**2)
for i in range(M):
for j in range(M):
div[i+j] = nobs - np.max(self.df_model[i]+1,
self.df_model[j]+1)
div.reshape(M,M)
# does not handle (#,)
self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(sig/div)).T
return sig/div | Computes the sigma matrix and update the cholesky decomposition. | _compute_sigma | python | statsmodels/statsmodels | statsmodels/sandbox/sysreg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/sysreg.py | BSD-3-Clause |
def whiten(self, X):
"""
SUR whiten method.
Parameters
----------
X : list of arrays
Data to be whitened.
Returns
-------
If X is the exogenous RHS of the system.
``np.dot(np.kron(cholsigmainv,np.eye(M)),np.diag(X))``
If X is the endogenous LHS of the system.
"""
nobs = self.nobs
if X is self.endog: # definitely not a robust check
return np.dot(np.kron(self.cholsigmainv,np.eye(nobs)),
X.reshape(-1,1))
elif X is self.sp_exog:
return (sparse.kron(self.cholsigmainv,
sparse.eye(nobs,nobs))*X).toarray()#*=dot until cast to array | SUR whiten method.
Parameters
----------
X : list of arrays
Data to be whitened.
Returns
-------
If X is the exogenous RHS of the system.
``np.dot(np.kron(cholsigmainv,np.eye(M)),np.diag(X))``
If X is the endogenous LHS of the system. | whiten | python | statsmodels/statsmodels | statsmodels/sandbox/sysreg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/sysreg.py | BSD-3-Clause |
def fit(self, igls=False, tol=1e-5, maxiter=100):
"""
igls : bool
Iterate until estimates converge if sigma is None instead of
two-step GLS, which is the default is sigma is None.
tol : float
maxiter : int
Notes
-----
This ia naive implementation that does not exploit the block
diagonal structure. It should work for ill-conditioned `sigma`
but this is untested.
"""
if not np.any(self.sigma):
self.sigma = self._compute_sigma(self.endog, self.exog)
M = self._M
beta = np.dot(self.pinv_wexog, self.wendog)
self._update_history(beta)
self.iterations += 1
if not igls:
sur_fit = SysResults(self, beta, self.normalized_cov_params)
return sur_fit
conv = self.history['params']
while igls and (np.any(np.abs(conv[-2] - conv[-1]) > tol)) and \
(self.iterations < maxiter):
fittedvalues = (self.sp_exog*beta).reshape(M,-1)
resids = self.endog - fittedvalues # do not attach results yet
self.sigma = self._compute_sigma(resids) # need to attach for compute?
self.wendog = self.whiten(self.endog)
self.wexog = self.whiten(self.sp_exog)
self.pinv_wexog = np.linalg.pinv(self.wexog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
beta = np.dot(self.pinv_wexog, self.wendog)
self._update_history(beta)
self.iterations += 1
sur_fit = SysResults(self, beta, self.normalized_cov_params)
return sur_fit | igls : bool
Iterate until estimates converge if sigma is None instead of
two-step GLS, which is the default is sigma is None.
tol : float
maxiter : int
Notes
-----
This ia naive implementation that does not exploit the block
diagonal structure. It should work for ill-conditioned `sigma`
but this is untested. | fit | python | statsmodels/statsmodels | statsmodels/sandbox/sysreg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/sysreg.py | BSD-3-Clause |
def whiten(self, Y):
"""
Runs the first stage of the 2SLS.
Returns the RHS variables that include the instruments.
"""
wexog = []
indep_endog = self._indep_endog # this has the col mapping
# fullexog = self.fullexog
for eq in range(self._M): # need to go through all equations regardless
instr_eq = Y.get(eq, None) # Y has the eq to ind endog array map
newRHS = self.exog[eq].copy()
if instr_eq:
for i,LHS in enumerate(instr_eq):
yhat = GLS(LHS, self.instruments).fit().fittedvalues
newRHS[:,indep_endog[eq][i]] = yhat
# this might fail if there is a one variable column (nobs,)
# in exog
wexog.append(newRHS)
return wexog | Runs the first stage of the 2SLS.
Returns the RHS variables that include the instruments. | whiten | python | statsmodels/statsmodels | statsmodels/sandbox/sysreg.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/sysreg.py | BSD-3-Clause |
def descstats(data, cols=None, axis=0):
'''
Prints descriptive statistics for one or multiple variables.
Parameters
----------
data: numpy array
`x` is the data
v: list, optional
A list of the column number of variables.
Default is all columns.
axis: 1 or 0
axis order of data. Default is 0 for column-ordered data.
Examples
--------
>>> descstats(data.exog,v=['x_1','x_2','x_3'])
'''
x = np.array(data) # or rather, the data we're interested in
if cols is None:
x = x[:, None]
if cols is None and x.ndim == 1:
x = x[:,None]
if x.shape[1] == 1:
desc = '''
---------------------------------------------
Univariate Descriptive Statistics
---------------------------------------------
Var. Name %(name)12s
----------
Obs. %(nobs)22i Range %(range)22s
Sum of Wts. %(sum)22s Coeff. of Variation %(coeffvar)22.4g
Mode %(mode)22.4g Skewness %(skewness)22.4g
Repeats %(nmode)22i Kurtosis %(kurtosis)22.4g
Mean %(mean)22.4g Uncorrected SS %(uss)22.4g
Median %(median)22.4g Corrected SS %(ss)22.4g
Variance %(variance)22.4g Sum Observations %(sobs)22.4g
Std. Dev. %(stddev)22.4g
''' % {'name': cols, 'sum': 'N/A', 'nobs': len(x), 'mode': \
stats.mode(x)[0][0], 'nmode': stats.mode(x)[1][0], \
'mean': x.mean(), 'median': np.median(x), 'range': \
'('+str(x.min())+', '+str(x.max())+')', 'variance': \
x.var(), 'stddev': x.std(), 'coeffvar': \
stats.variation(x), 'skewness': stats.skew(x), \
'kurtosis': stats.kurtosis(x), 'uss': np.sum(x**2, axis=0),\
'ss': np.sum((x-x.mean())**2, axis=0), 'sobs': np.sum(x)}
desc+= '''
Percentiles
-------------
1 %% %12.4g
5 %% %12.4g
10 %% %12.4g
25 %% %12.4g
50 %% %12.4g
75 %% %12.4g
90 %% %12.4g
95 %% %12.4g
99 %% %12.4g
''' % tuple([stats.scoreatpercentile(x,per) for per in (1,5,10,25,
50,75,90,95,99)])
t,p_t=stats.ttest_1samp(x,0)
M,p_M=sign_test(x)
S,p_S=stats.wilcoxon(np.squeeze(x))
desc+= '''
Tests of Location (H0: Mu0=0)
-----------------------------
Test Statistic Two-tailed probability
-----------------+-----------------------------------------
Student's t | t {:7.5f} Pr > |t| <{:.4f}
Sign | M {:8.2f} Pr >= |M| <{:.4f}
Signed Rank | S {:8.2f} Pr >= |S| <{:.4f}
'''.format(t,p_t,M,p_M,S,p_S)
# Should this be part of a 'descstats'
# in any event these should be split up, so that they can be called
# individually and only returned together if someone calls summary
# or something of the sort
elif x.shape[1] > 1:
desc ='''
Var. Name | Obs. Mean Std. Dev. Range
------------+--------------------------------------------------------'''+\
os.linesep
for var in range(x.shape[1]):
xv = x[:, var]
kwargs = {
'name': var,
'obs': len(xv),
'mean': xv.mean(),
'stddev': xv.std(),
'range': '('+str(xv.min())+', '+str(xv.max())+')'+os.linesep
}
desc += ("%(name)15s %(obs)9i %(mean)12.4g %(stddev)12.4g "
"%(range)20s" % kwargs)
else:
raise ValueError("data not understood")
return desc | Prints descriptive statistics for one or multiple variables.
Parameters
----------
data: numpy array
`x` is the data
v: list, optional
A list of the column number of variables.
Default is all columns.
axis: 1 or 0
axis order of data. Default is 0 for column-ordered data.
Examples
--------
>>> descstats(data.exog,v=['x_1','x_2','x_3']) | descstats | python | statsmodels/statsmodels | statsmodels/sandbox/descstats.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/descstats.py | BSD-3-Clause |
def _model2dataframe(model_endog, model_exog, model_type=OLS, **kwargs):
"""return a series containing the summary of a linear model
All the exceding parameters will be redirected to the linear model
"""
# create the linear model and perform the fit
model_result = model_type(model_endog, model_exog, **kwargs).fit()
# keeps track of some global statistics
statistics = pd.Series(
{"r2": model_result.rsquared, "adj_r2": model_result.rsquared_adj}
)
# put them togher with the result for each term
result_df = pd.DataFrame(
{
"params": model_result.params,
"pvals": model_result.pvalues,
"std": model_result.bse,
"statistics": statistics,
}
)
# add the complexive results for f-value and the total p-value
fisher_df = pd.DataFrame(
{
"params": {"_f_test": model_result.fvalue},
"pvals": {"_f_test": model_result.f_pvalue},
}
)
# merge them and unstack to obtain a hierarchically indexed series
res_series = pd.concat([result_df, fisher_df]).unstack()
return res_series.dropna() | return a series containing the summary of a linear model
All the exceding parameters will be redirected to the linear model | _model2dataframe | python | statsmodels/statsmodels | statsmodels/sandbox/multilinear.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/multilinear.py | BSD-3-Clause |
def multiOLS(model, dataframe, column_list=None, method='fdr_bh',
alpha=0.05, subset=None, model_type=OLS, **kwargs):
"""apply a linear model to several endogenous variables on a dataframe
Take a linear model definition via formula and a dataframe that will be
the environment of the model, and apply the linear model to a subset
(or all) of the columns of the dataframe. It will return a dataframe
with part of the information from the linear model summary.
Parameters
----------
model : str
formula description of the model
dataframe : pandas.dataframe
dataframe where the model will be evaluated
column_list : list[str], optional
Names of the columns to analyze with the model.
If None (Default) it will perform the function on all the
eligible columns (numerical type and not in the model definition)
model_type : model class, optional
The type of model to be used. The default is the linear model.
Can be any linear model (OLS, WLS, GLS, etc..)
method : str, optional
the method used to perform the pvalue correction for multiple testing.
default is the Benjamini/Hochberg, other available methods are:
`bonferroni` : one-step correction
`sidak` : on-step correction
`holm-sidak` :
`holm` :
`simes-hochberg` :
`hommel` :
`fdr_bh` : Benjamini/Hochberg
`fdr_by` : Benjamini/Yekutieli
alpha : float, optional
the significance level used for the pvalue correction (default 0.05)
subset : bool array
the selected rows to be used in the regression
all the other parameters will be directed to the model creation.
Returns
-------
summary : pandas.DataFrame
a dataframe containing an extract from the summary of the model
obtained for each columns. It will give the model complexive f test
result and p-value, and the regression value and standard deviarion
for each of the regressors. The DataFrame has a hierachical column
structure, divided as:
- params: contains the parameters resulting from the models. Has
an additional column named _f_test containing the result of the
F test.
- pval: the pvalue results of the models. Has the _f_test column
for the significativity of the whole test.
- adj_pval: the corrected pvalues via the multitest function.
- std: uncertainties of the model parameters
- statistics: contains the r squared statistics and the adjusted
r squared.
Notes
-----
The main application of this function is on system biology to perform
a linear model testing of a lot of different parameters, like the
different genetic expression of several genes.
See Also
--------
statsmodels.stats.multitest
contains several functions to perform the multiple p-value correction
Examples
--------
Using the longley data as dataframe example
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load_pandas()
>>> df = data.exog
>>> df['TOTEMP'] = data.endog
This will perform the specified linear model on all the
other columns of the dataframe
>>> multiOLS('GNP + 1', df)
This select only a certain subset of the columns
>>> multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
It is possible to specify a trasformation also on the target column,
conforming to the patsy formula specification
>>> multiOLS('GNP + 0', df, ['I(GNPDEFL**2)', 'center(TOTEMP)'])
It is possible to specify the subset of the dataframe
on which perform the analysis
>> multiOLS('GNP + 1', df, subset=df.GNPDEFL > 90)
Even a single column name can be given without enclosing it in a list
>>> multiOLS('GNP + 0', df, 'GNPDEFL')
"""
# data normalization
# if None take all the numerical columns that are not present in the model
# it's not waterproof but is a good enough criterion for everyday use
if column_list is None:
column_list = [
name
for name in dataframe.columns
if dataframe[name].dtype != object and name not in model
]
# if it's a single string transform it in a single element list
if isinstance(column_list, str):
column_list = [column_list]
if subset is not None:
dataframe = dataframe.loc[subset]
# perform each model and retrieve the statistics
col_results = {}
# as the model will use always the same endogenous variables
# we can create them once and reuse
mgr = FormulaManager()
model_exog = mgr.get_matrices(model, dataframe, pandas=True)
for col_name in column_list:
# it will try to interpret the column name as a valid dataframe
# index as it can be several times faster. If it fails it
# interpret it as a patsy formula (for example for centering)
try:
model_endog = dataframe[col_name]
except KeyError:
model_endog = mgr.get_matrices(
col_name + " + 0", data=dataframe, pandas=True
)
# retrieve the result and store them
res = _model2dataframe(model_endog, model_exog, model_type, **kwargs)
col_results[col_name] = res
# mangle them togheter and sort by complexive p-value
summary = pd.DataFrame(col_results)
# order by the p-value: the most useful model first!
summary = summary.T.sort_values([('pvals', '_f_test')])
summary.index.name = 'endogenous vars'
# implementing the pvalue correction method
smt = stats.multipletests
for (key1, key2) in summary:
if key1 != 'pvals':
continue
p_values = summary[key1, key2]
corrected = smt(p_values, method=method, alpha=alpha)[1]
# extend the dataframe of results with the column
# of the corrected p_values
summary['adj_' + key1, key2] = corrected
return summary | apply a linear model to several endogenous variables on a dataframe
Take a linear model definition via formula and a dataframe that will be
the environment of the model, and apply the linear model to a subset
(or all) of the columns of the dataframe. It will return a dataframe
with part of the information from the linear model summary.
Parameters
----------
model : str
formula description of the model
dataframe : pandas.dataframe
dataframe where the model will be evaluated
column_list : list[str], optional
Names of the columns to analyze with the model.
If None (Default) it will perform the function on all the
eligible columns (numerical type and not in the model definition)
model_type : model class, optional
The type of model to be used. The default is the linear model.
Can be any linear model (OLS, WLS, GLS, etc..)
method : str, optional
the method used to perform the pvalue correction for multiple testing.
default is the Benjamini/Hochberg, other available methods are:
`bonferroni` : one-step correction
`sidak` : on-step correction
`holm-sidak` :
`holm` :
`simes-hochberg` :
`hommel` :
`fdr_bh` : Benjamini/Hochberg
`fdr_by` : Benjamini/Yekutieli
alpha : float, optional
the significance level used for the pvalue correction (default 0.05)
subset : bool array
the selected rows to be used in the regression
all the other parameters will be directed to the model creation.
Returns
-------
summary : pandas.DataFrame
a dataframe containing an extract from the summary of the model
obtained for each columns. It will give the model complexive f test
result and p-value, and the regression value and standard deviarion
for each of the regressors. The DataFrame has a hierachical column
structure, divided as:
- params: contains the parameters resulting from the models. Has
an additional column named _f_test containing the result of the
F test.
- pval: the pvalue results of the models. Has the _f_test column
for the significativity of the whole test.
- adj_pval: the corrected pvalues via the multitest function.
- std: uncertainties of the model parameters
- statistics: contains the r squared statistics and the adjusted
r squared.
Notes
-----
The main application of this function is on system biology to perform
a linear model testing of a lot of different parameters, like the
different genetic expression of several genes.
See Also
--------
statsmodels.stats.multitest
contains several functions to perform the multiple p-value correction
Examples
--------
Using the longley data as dataframe example
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load_pandas()
>>> df = data.exog
>>> df['TOTEMP'] = data.endog
This will perform the specified linear model on all the
other columns of the dataframe
>>> multiOLS('GNP + 1', df)
This select only a certain subset of the columns
>>> multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
It is possible to specify a trasformation also on the target column,
conforming to the patsy formula specification
>>> multiOLS('GNP + 0', df, ['I(GNPDEFL**2)', 'center(TOTEMP)'])
It is possible to specify the subset of the dataframe
on which perform the analysis
>> multiOLS('GNP + 1', df, subset=df.GNPDEFL > 90)
Even a single column name can be given without enclosing it in a list
>>> multiOLS('GNP + 0', df, 'GNPDEFL') | multiOLS | python | statsmodels/statsmodels | statsmodels/sandbox/multilinear.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/multilinear.py | BSD-3-Clause |
def _test_group(pvalues, group_name, group, exact=True):
"""test if the objects in the group are different from the general set.
The test is performed on the pvalues set (ad a pandas series) over
the group specified via a fisher exact test.
"""
from scipy.stats import chi2_contingency, fisher_exact
totals = 1.0 * len(pvalues)
total_significant = 1.0 * np.sum(pvalues)
cross_index = [c for c in group if c in pvalues.index]
missing = [c for c in group if c not in pvalues.index]
if missing:
s = ('the test is not well defined if the group '
'has elements not presents in the significativity '
'array. group name: {}, missing elements: {}')
logging.warning(s.format(group_name, missing))
# how many are significant and not in the group
group_total = 1.0 * len(cross_index)
group_sign = 1.0 * len([c for c in cross_index if pvalues[c]])
group_nonsign = 1.0 * (group_total - group_sign)
# how many are significant and not outside the group
extern_sign = 1.0 * (total_significant - group_sign)
extern_nonsign = 1.0 * (totals - total_significant - group_nonsign)
# make the fisher test or the chi squared
test = fisher_exact if exact else chi2_contingency
table = [[extern_nonsign, extern_sign], [group_nonsign, group_sign]]
pvalue = test(np.array(table))[1]
# is the group more represented or less?
part = group_sign, group_nonsign, extern_sign, extern_nonsign
# increase = (group_sign / group_total) > (total_significant / totals)
increase = np.log((totals * group_sign)
/ (total_significant * group_total))
return pvalue, increase, part | test if the objects in the group are different from the general set.
The test is performed on the pvalues set (ad a pandas series) over
the group specified via a fisher exact test. | _test_group | python | statsmodels/statsmodels | statsmodels/sandbox/multilinear.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/multilinear.py | BSD-3-Clause |
def multigroup(pvals, groups, exact=True, keep_all=True, alpha=0.05):
"""Test if the given groups are different from the total partition.
Given a boolean array test if each group has a proportion of positives
different than the complexive proportion.
The test can be done as an exact Fisher test or approximated as a
Chi squared test for more speed.
Parameters
----------
pvals : pandas series of boolean
the significativity of the variables under analysis
groups : dict of list
the name of each category of variables under exam.
each one is a list of the variables included
exact : bool, optional
If True (default) use the fisher exact test, otherwise
use the chi squared test for contingencies tables.
For high number of elements in the array the fisher test can
be significantly slower than the chi squared.
keep_all : bool, optional
if False it will drop those groups where the fraction
of positive is below the expected result. If True (default)
it will keep all the significant results.
alpha : float, optional
the significativity level for the pvalue correction
on the whole set of groups (not inside the groups themselves).
Returns
-------
result_df: pandas dataframe
for each group returns:
pvals - the fisher p value of the test
adj_pvals - the adjusted pvals
increase - the log of the odd ratio between the
internal significant ratio versus the external one
_in_sign - significative elements inside the group
_in_non - non significative elements inside the group
_out_sign - significative elements outside the group
_out_non - non significative elements outside the group
Notes
-----
This test allow to see if a category of variables is generally better
suited to be described for the model. For example to see if a predictor
gives more information on demographic or economical parameters,
by creating two groups containing the endogenous variables of each
category.
This function is conceived for medical dataset with a lot of variables
that can be easily grouped into functional groups. This is because
The significativity of a group require a rather large number of
composing elements.
Examples
--------
A toy example on a real dataset, the Guerry dataset from R
>>> url = "https://raw.githubusercontent.com/vincentarelbundock/"
>>> url = url + "Rdatasets/csv/HistData/Guerry.csv"
>>> df = pd.read_csv(url, index_col='dept')
evaluate the relationship between the various paramenters whith the Wealth
>>> pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
define the groups
>>> groups = {}
>>> groups['crime'] = ['Crime_prop', 'Infanticide',
... 'Crime_parents', 'Desertion', 'Crime_pers']
>>> groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
>>> groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
do the analysis of the significativity
>>> multigroup(pvals < 0.05, groups)
"""
pvals = pd.Series(pvals)
if not (set(pvals.unique()) <= {False, True}):
raise ValueError("the series should be binary")
if hasattr(pvals.index, "is_unique") and not pvals.index.is_unique:
raise ValueError("series with duplicated index is not accepted")
results = {
"pvals": {},
"increase": {},
"_in_sign": {},
"_in_non": {},
"_out_sign": {},
"_out_non": {},
}
for group_name, group_list in groups.items():
res = _test_group(pvals, group_name, group_list, exact)
results['pvals'][group_name] = res[0]
results['increase'][group_name] = res[1]
results['_in_sign'][group_name] = res[2][0]
results['_in_non'][group_name] = res[2][1]
results['_out_sign'][group_name] = res[2][2]
results['_out_non'][group_name] = res[2][3]
result_df = pd.DataFrame(results).sort_values('pvals')
if not keep_all:
result_df = result_df[result_df.increase]
smt = stats.multipletests
corrected = smt(result_df['pvals'], method='fdr_bh', alpha=alpha)[1]
result_df['adj_pvals'] = corrected
return result_df | Test if the given groups are different from the total partition.
Given a boolean array test if each group has a proportion of positives
different than the complexive proportion.
The test can be done as an exact Fisher test or approximated as a
Chi squared test for more speed.
Parameters
----------
pvals : pandas series of boolean
the significativity of the variables under analysis
groups : dict of list
the name of each category of variables under exam.
each one is a list of the variables included
exact : bool, optional
If True (default) use the fisher exact test, otherwise
use the chi squared test for contingencies tables.
For high number of elements in the array the fisher test can
be significantly slower than the chi squared.
keep_all : bool, optional
if False it will drop those groups where the fraction
of positive is below the expected result. If True (default)
it will keep all the significant results.
alpha : float, optional
the significativity level for the pvalue correction
on the whole set of groups (not inside the groups themselves).
Returns
-------
result_df: pandas dataframe
for each group returns:
pvals - the fisher p value of the test
adj_pvals - the adjusted pvals
increase - the log of the odd ratio between the
internal significant ratio versus the external one
_in_sign - significative elements inside the group
_in_non - non significative elements inside the group
_out_sign - significative elements outside the group
_out_non - non significative elements outside the group
Notes
-----
This test allow to see if a category of variables is generally better
suited to be described for the model. For example to see if a predictor
gives more information on demographic or economical parameters,
by creating two groups containing the endogenous variables of each
category.
This function is conceived for medical dataset with a lot of variables
that can be easily grouped into functional groups. This is because
The significativity of a group require a rather large number of
composing elements.
Examples
--------
A toy example on a real dataset, the Guerry dataset from R
>>> url = "https://raw.githubusercontent.com/vincentarelbundock/"
>>> url = url + "Rdatasets/csv/HistData/Guerry.csv"
>>> df = pd.read_csv(url, index_col='dept')
evaluate the relationship between the various paramenters whith the Wealth
>>> pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
define the groups
>>> groups = {}
>>> groups['crime'] = ['Crime_prop', 'Infanticide',
... 'Crime_parents', 'Desertion', 'Crime_pers']
>>> groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
>>> groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
do the analysis of the significativity
>>> multigroup(pvals < 0.05, groups) | multigroup | python | statsmodels/statsmodels | statsmodels/sandbox/multilinear.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/multilinear.py | BSD-3-Clause |
def rwexog(self):
"""Whitened exogenous variables augmented with restrictions"""
if self._rwexog is None:
P = self.ncoeffs
K = self.nconstraint
design = np.zeros((P + K, P + K))
design[:P, :P] = np.dot(self.wexog.T, self.wexog) #top left
constr = np.reshape(self.constraint, (K, P))
design[:P, P:] = constr.T #top right partition
design[P:, :P] = constr #bottom left partition
design[P:, P:] = np.zeros((K, K)) #bottom right partition
self._rwexog = design
return self._rwexog | Whitened exogenous variables augmented with restrictions | rwexog | python | statsmodels/statsmodels | statsmodels/sandbox/rls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/rls.py | BSD-3-Clause |
def inv_rwexog(self):
"""Inverse of self.rwexog"""
if self._inv_rwexog is None:
self._inv_rwexog = np.linalg.inv(self.rwexog)
return self._inv_rwexog | Inverse of self.rwexog | inv_rwexog | python | statsmodels/statsmodels | statsmodels/sandbox/rls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/rls.py | BSD-3-Clause |
def rwendog(self):
"""Whitened endogenous variable augmented with restriction parameters"""
if self._rwendog is None:
P = self.ncoeffs
K = self.nconstraint
response = np.zeros((P + K,))
response[:P] = np.dot(self.wexog.T, self.wendog)
response[P:] = self.param
self._rwendog = response
return self._rwendog | Whitened endogenous variable augmented with restriction parameters | rwendog | python | statsmodels/statsmodels | statsmodels/sandbox/rls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/rls.py | BSD-3-Clause |
def rnorm_cov_params(self):
"""Parameter covariance under restrictions"""
if self._ncp is None:
P = self.ncoeffs
self._ncp = self.inv_rwexog[:P, :P]
return self._ncp | Parameter covariance under restrictions | rnorm_cov_params | python | statsmodels/statsmodels | statsmodels/sandbox/rls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/rls.py | BSD-3-Clause |
def wrnorm_cov_params(self):
"""
Heteroskedasticity-consistent parameter covariance
Used to calculate White standard errors.
"""
if self._wncp is None:
df = self.df_resid
pred = np.dot(self.wexog, self.coeffs)
eps = np.diag((self.wendog - pred) ** 2)
sigmaSq = np.sum(eps)
pinvX = np.dot(self.rnorm_cov_params, self.wexog.T)
self._wncp = np.dot(np.dot(pinvX, eps), pinvX.T) * df / sigmaSq
return self._wncp | Heteroskedasticity-consistent parameter covariance
Used to calculate White standard errors. | wrnorm_cov_params | python | statsmodels/statsmodels | statsmodels/sandbox/rls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/rls.py | BSD-3-Clause |
def coeffs(self):
"""Estimated parameters"""
if self._coeffs is None:
betaLambda = np.dot(self.inv_rwexog, self.rwendog)
self._coeffs = betaLambda[:self.ncoeffs]
return self._coeffs | Estimated parameters | coeffs | python | statsmodels/statsmodels | statsmodels/sandbox/rls.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/rls.py | BSD-3-Clause |
def __init__(self,data,names=None):
"""
p X N matrix input
"""
A = np.array(data).T
n,p = A.shape
self.n,self.p = n,p
if p > n:
from warnings import warn
warn('p > n - intentional?', RuntimeWarning)
self.A = A
self._origA=A.copy()
self.__calc()
self._colors= np.tile(self._colors,int((p-1)/len(self._colors))+1)[:p]
if names is not None and len(names) != p:
raise ValueError('names must match data dimension')
self.names = None if names is None else tuple([str(x) for x in names]) | p X N matrix input | __init__ | python | statsmodels/statsmodels | statsmodels/sandbox/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/pca.py | BSD-3-Clause |
def getCovarianceMatrix(self):
"""
returns the covariance matrix for the dataset
"""
return np.cov(self.N.T) | returns the covariance matrix for the dataset | getCovarianceMatrix | python | statsmodels/statsmodels | statsmodels/sandbox/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/pca.py | BSD-3-Clause |
def getEigensystem(self):
"""
returns a tuple of (eigenvalues,eigenvectors) for the data set.
"""
if self._eig is None:
res = np.linalg.eig(self.getCovarianceMatrix())
sorti=np.argsort(res[0])[::-1]
res=(res[0][sorti],res[1][:,sorti])
self._eig=res
return self._eig | returns a tuple of (eigenvalues,eigenvectors) for the data set. | getEigensystem | python | statsmodels/statsmodels | statsmodels/sandbox/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/pca.py | BSD-3-Clause |
def getEnergies(self):
"""
"energies" are just normalized eigenvectors
"""
v=self.getEigenvalues()
return v/np.sum(v) | "energies" are just normalized eigenvectors | getEnergies | python | statsmodels/statsmodels | statsmodels/sandbox/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/sandbox/pca.py | BSD-3-Clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.