code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def _compute_using_svd(self):
"""SVD method to compute eigenvalues and eigenvecs"""
x = self.transformed_data
u, s, v = np.linalg.svd(x, full_matrices=self._svd_full_matrices)
self.eigenvals = s ** 2.0
self.eigenvecs = v.T | SVD method to compute eigenvalues and eigenvecs | _compute_using_svd | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_using_eig(self):
"""
Eigenvalue decomposition method to compute eigenvalues and eigenvectors
"""
x = self.transformed_data
self.eigenvals, self.eigenvecs = np.linalg.eigh(x.T.dot(x)) | Eigenvalue decomposition method to compute eigenvalues and eigenvectors | _compute_using_eig | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_using_nipals(self):
"""
NIPALS implementation to compute small number of eigenvalues
and eigenvectors
"""
x = self.transformed_data
if self._ncomp > 1:
x = x + 0.0 # Copy
tol, max_iter, ncomp = self._tol, self._max_iter, self._ncomp
vals = np.zeros(self._ncomp)
vecs = np.zeros((self._nvar, self._ncomp))
for i in range(ncomp):
max_var_ind = np.argmax(x.var(0))
factor = x[:, [max_var_ind]]
_iter = 0
diff = 1.0
while diff > tol and _iter < max_iter:
vec = x.T.dot(factor) / (factor.T.dot(factor))
vec = vec / np.sqrt(vec.T.dot(vec))
factor_last = factor
factor = x.dot(vec) / (vec.T.dot(vec))
diff = _norm(factor - factor_last) / _norm(factor)
_iter += 1
vals[i] = (factor ** 2).sum()
vecs[:, [i]] = vec
if ncomp > 1:
x -= factor.dot(vec.T)
self.eigenvals = vals
self.eigenvecs = vecs | NIPALS implementation to compute small number of eigenvalues
and eigenvectors | _compute_using_nipals | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _fill_missing_em(self):
"""
EM algorithm to fill missing values
"""
non_missing = np.logical_not(np.isnan(self.data))
# If nothing missing, return without altering the data
if np.all(non_missing):
return self.data
# 1. Standardized data as needed
data = self.transformed_data = np.asarray(self._prepare_data())
ncomp = self._ncomp
# 2. Check for all nans
col_non_missing = np.sum(non_missing, 1)
row_non_missing = np.sum(non_missing, 0)
if np.any(col_non_missing < ncomp) or np.any(row_non_missing < ncomp):
raise ValueError('Implementation requires that all columns and '
'all rows have at least ncomp non-missing values')
# 3. Get mask
mask = np.isnan(data)
# 4. Compute mean
mu = np.nanmean(data, 0)
# 5. Replace missing with mean
projection = np.ones((self._nobs, 1)) * mu
projection_masked = projection[mask]
data[mask] = projection_masked
# 6. Compute eigenvalues and fit
diff = 1.0
_iter = 0
while diff > self._tol_em and _iter < self._max_em_iter:
last_projection_masked = projection_masked
# Set transformed data to compute eigenvalues
self.transformed_data = data
# Call correct eig function here
self._compute_eig()
# Call function to compute factors and projection
self._compute_pca_from_eig()
projection = np.asarray(self.project(transform=False,
unweight=False))
projection_masked = projection[mask]
data[mask] = projection_masked
delta = last_projection_masked - projection_masked
diff = _norm(delta) / _norm(projection_masked)
_iter += 1
# Must copy to avoid overwriting original data since replacing values
data = self._adjusted_data + 0.0
projection = np.asarray(self.project())
data[mask] = projection[mask]
return data | EM algorithm to fill missing values | _fill_missing_em | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_pca_from_eig(self):
"""
Compute relevant statistics after eigenvalues have been computed
"""
# Ensure sorted largest to smallest
vals, vecs = self.eigenvals, self.eigenvecs
indices = np.argsort(vals)
indices = indices[::-1]
vals = vals[indices]
vecs = vecs[:, indices]
if (vals <= 0).any():
# Discard and warn
num_good = vals.shape[0] - (vals <= 0).sum()
if num_good < self._ncomp:
import warnings
warnings.warn('Only {num:d} eigenvalues are positive. '
'This is the maximum number of components '
'that can be extracted.'.format(num=num_good),
EstimationWarning)
self._ncomp = num_good
vals[num_good:] = np.finfo(np.float64).tiny
# Use ncomp for the remaining calculations
vals = vals[:self._ncomp]
vecs = vecs[:, :self._ncomp]
self.eigenvals, self.eigenvecs = vals, vecs
# Select correct number of components to return
self.scores = self.factors = self.transformed_data.dot(vecs)
self.loadings = vecs
self.coeff = vecs.T
if self._normalize:
self.coeff = (self.coeff.T * np.sqrt(vals)).T
self.factors /= np.sqrt(vals)
self.scores = self.factors | Compute relevant statistics after eigenvalues have been computed | _compute_pca_from_eig | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _compute_rsquare_and_ic(self):
"""
Final statistics to compute
"""
# TSS and related calculations
# TODO: This needs careful testing, with and without weights,
# gls, standardized and demean
weights = self.weights
ss_data = self.transformed_data * np.sqrt(weights)
self._tss_indiv = np.sum(ss_data ** 2, 0)
self._tss = np.sum(self._tss_indiv)
self._ess = np.zeros(self._ncomp + 1)
self._ess_indiv = np.zeros((self._ncomp + 1, self._nvar))
for i in range(self._ncomp + 1):
# Projection in the same space as transformed_data
projection = self.project(ncomp=i, transform=False, unweight=False)
indiv_rss = (projection ** 2).sum(axis=0)
rss = indiv_rss.sum()
self._ess[i] = self._tss - rss
self._ess_indiv[i, :] = self._tss_indiv - indiv_rss
self.rsquare = 1.0 - self._ess / self._tss
# Information Criteria
ess = self._ess
invalid = ess <= 0 # Prevent log issues of 0
if invalid.any():
last_obs = (np.where(invalid)[0]).min()
ess = ess[:last_obs]
log_ess = np.log(ess)
r = np.arange(ess.shape[0])
nobs, nvar = self._nobs, self._nvar
sum_to_prod = (nobs + nvar) / (nobs * nvar)
min_dim = min(nobs, nvar)
penalties = np.array([sum_to_prod * np.log(1.0 / sum_to_prod),
sum_to_prod * np.log(min_dim),
np.log(min_dim) / min_dim])
penalties = penalties[:, None]
ic = log_ess + r * penalties
self.ic = ic.T | Final statistics to compute | _compute_rsquare_and_ic | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def project(self, ncomp=None, transform=True, unweight=True):
"""
Project series onto a specific number of factors.
Parameters
----------
ncomp : int, optional
Number of components to use. If omitted, all components
initially computed are used.
transform : bool, optional
Flag indicating whether to return the projection in the original
space of the data (True, default) or in the space of the
standardized/demeaned data.
unweight : bool, optional
Flag indicating whether to undo the effects of the estimation
weights.
Returns
-------
array_like
The nobs by nvar array of the projection onto ncomp factors.
Notes
-----
"""
# Projection needs to be scaled/shifted based on inputs
ncomp = self._ncomp if ncomp is None else ncomp
if ncomp > self._ncomp:
raise ValueError('ncomp must be smaller than the number of '
'components computed.')
factors = np.asarray(self.factors)
coeff = np.asarray(self.coeff)
projection = factors[:, :ncomp].dot(coeff[:ncomp, :])
if transform or unweight:
projection *= np.sqrt(self.weights)
if transform:
# Remove the weights, which do not depend on transformation
if self._standardize:
projection *= self._sigma
if self._standardize or self._demean:
projection += self._mu
if self._index is not None:
projection = pd.DataFrame(projection,
columns=self._columns,
index=self._index)
return projection | Project series onto a specific number of factors.
Parameters
----------
ncomp : int, optional
Number of components to use. If omitted, all components
initially computed are used.
transform : bool, optional
Flag indicating whether to return the projection in the original
space of the data (True, default) or in the space of the
standardized/demeaned data.
unweight : bool, optional
Flag indicating whether to undo the effects of the estimation
weights.
Returns
-------
array_like
The nobs by nvar array of the projection onto ncomp factors.
Notes
----- | project | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def _to_pandas(self):
"""
Returns pandas DataFrames for all values
"""
index = self._index
# Principal Components
num_zeros = np.ceil(np.log10(self._ncomp))
comp_str = 'comp_{0:0' + str(int(num_zeros)) + 'd}'
cols = [comp_str.format(i) for i in range(self._ncomp)]
df = pd.DataFrame(self.factors, columns=cols, index=index)
self.scores = self.factors = df
# Projections
df = pd.DataFrame(self.projection,
columns=self._columns,
index=index)
self.projection = df
# Weights
df = pd.DataFrame(self.coeff, index=cols,
columns=self._columns)
self.coeff = df
# Loadings
df = pd.DataFrame(self.loadings,
index=self._columns, columns=cols)
self.loadings = df
# eigenvals
self.eigenvals = pd.Series(self.eigenvals)
self.eigenvals.name = 'eigenvals'
# eigenvecs
vec_str = comp_str.replace('comp', 'eigenvec')
cols = [vec_str.format(i) for i in range(self.eigenvecs.shape[1])]
self.eigenvecs = pd.DataFrame(self.eigenvecs, columns=cols)
# R2
self.rsquare = pd.Series(self.rsquare)
self.rsquare.index.name = 'ncomp'
self.rsquare.name = 'rsquare'
# IC
self.ic = pd.DataFrame(self.ic, columns=['IC_p1', 'IC_p2', 'IC_p3'])
self.ic.index.name = 'ncomp' | Returns pandas DataFrames for all values | _to_pandas | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def plot_scree(self, ncomp=None, log_scale=True,
cumulative=False, ax=None):
"""
Plot of the ordered eigenvalues
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
included the same as the number of components computed
log_scale : boot, optional
Flag indicating whether ot use a log scale for the y-axis
cumulative : bool, optional
Flag indicating whether to plot the eigenvalues or cumulative
eigenvalues
ax : AxesSubplot, optional
An axes on which to draw the graph. If omitted, new a figure
is created
Returns
-------
matplotlib.figure.Figure
The handle to the figure.
"""
import statsmodels.graphics.utils as gutils
fig, ax = gutils.create_mpl_ax(ax)
ncomp = self._ncomp if ncomp is None else ncomp
vals = np.asarray(self.eigenvals)
vals = vals[:self._ncomp]
if cumulative:
vals = np.cumsum(vals)
if log_scale:
ax.set_yscale('log')
ax.plot(np.arange(ncomp), vals[: ncomp], 'bo')
ax.autoscale(tight=True)
xlim = np.array(ax.get_xlim())
sp = xlim[1] - xlim[0]
xlim += 0.02 * np.array([-sp, sp])
ax.set_xlim(xlim)
ylim = np.array(ax.get_ylim())
scale = 0.02
if log_scale:
sp = np.log(ylim[1] / ylim[0])
ylim = np.exp(np.array([np.log(ylim[0]) - scale * sp,
np.log(ylim[1]) + scale * sp]))
else:
sp = ylim[1] - ylim[0]
ylim += scale * np.array([-sp, sp])
ax.set_ylim(ylim)
ax.set_title('Scree Plot')
ax.set_ylabel('Eigenvalue')
ax.set_xlabel('Component Number')
fig.tight_layout()
return fig | Plot of the ordered eigenvalues
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
included the same as the number of components computed
log_scale : boot, optional
Flag indicating whether ot use a log scale for the y-axis
cumulative : bool, optional
Flag indicating whether to plot the eigenvalues or cumulative
eigenvalues
ax : AxesSubplot, optional
An axes on which to draw the graph. If omitted, new a figure
is created
Returns
-------
matplotlib.figure.Figure
The handle to the figure. | plot_scree | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def plot_rsquare(self, ncomp=None, ax=None):
"""
Box plots of the individual series R-square against the number of PCs.
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
plot the minimum of 10 or the number of computed components.
ax : AxesSubplot, optional
An axes on which to draw the graph. If omitted, new a figure
is created.
Returns
-------
matplotlib.figure.Figure
The handle to the figure.
"""
import statsmodels.graphics.utils as gutils
fig, ax = gutils.create_mpl_ax(ax)
ncomp = 10 if ncomp is None else ncomp
ncomp = min(ncomp, self._ncomp)
# R2s in rows, series in columns
r2s = 1.0 - self._ess_indiv / self._tss_indiv
r2s = r2s[1:]
r2s = r2s[:ncomp]
ax.boxplot(r2s.T)
ax.set_title('Individual Input $R^2$')
ax.set_ylabel('$R^2$')
ax.set_xlabel('Number of Included Principal Components')
return fig | Box plots of the individual series R-square against the number of PCs.
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
plot the minimum of 10 or the number of computed components.
ax : AxesSubplot, optional
An axes on which to draw the graph. If omitted, new a figure
is created.
Returns
-------
matplotlib.figure.Figure
The handle to the figure. | plot_rsquare | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def pca(data, ncomp=None, standardize=True, demean=True, normalize=True,
gls=False, weights=None, method='svd'):
"""
Perform Principal Component Analysis (PCA).
Parameters
----------
data : ndarray
Variables in columns, observations in rows.
ncomp : int, optional
Number of components to return. If None, returns the as many as the
smaller to the number of rows or columns of data.
standardize : bool, optional
Flag indicating to use standardized data with mean 0 and unit
variance. standardized being True implies demean.
demean : bool, optional
Flag indicating whether to demean data before computing principal
components. demean is ignored if standardize is True.
normalize : bool , optional
Indicates whether th normalize the factors to have unit inner
product. If False, the loadings will have unit inner product.
gls : bool, optional
Flag indicating to implement a two-step GLS estimator where
in the first step principal components are used to estimate residuals,
and then the inverse residual variance is used as a set of weights to
estimate the final principal components
weights : ndarray, optional
Series weights to use after transforming data according to standardize
or demean when computing the principal components.
method : str, optional
Determines the linear algebra routine uses. 'eig', the default,
uses an eigenvalue decomposition. 'svd' uses a singular value
decomposition.
Returns
-------
factors : {ndarray, DataFrame}
Array (nobs, ncomp) of principal components (also known as scores).
loadings : {ndarray, DataFrame}
Array (ncomp, nvar) of principal component loadings for constructing
the factors.
projection : {ndarray, DataFrame}
Array (nobs, nvar) containing the projection of the data onto the ncomp
estimated factors.
rsquare : {ndarray, Series}
Array (ncomp,) where the element in the ith position is the R-square
of including the fist i principal components. The values are
calculated on the transformed data, not the original data.
ic : {ndarray, DataFrame}
Array (ncomp, 3) containing the Bai and Ng (2003) Information
criteria. Each column is a different criteria, and each row
represents the number of included factors.
eigenvals : {ndarray, Series}
Array of eigenvalues (nvar,).
eigenvecs : {ndarray, DataFrame}
Array of eigenvectors. (nvar, nvar).
Notes
-----
This is a simple function wrapper around the PCA class. See PCA for
more information and additional methods.
"""
pc = PCA(data, ncomp=ncomp, standardize=standardize, demean=demean,
normalize=normalize, gls=gls, weights=weights, method=method)
return (pc.factors, pc.loadings, pc.projection, pc.rsquare, pc.ic,
pc.eigenvals, pc.eigenvecs) | Perform Principal Component Analysis (PCA).
Parameters
----------
data : ndarray
Variables in columns, observations in rows.
ncomp : int, optional
Number of components to return. If None, returns the as many as the
smaller to the number of rows or columns of data.
standardize : bool, optional
Flag indicating to use standardized data with mean 0 and unit
variance. standardized being True implies demean.
demean : bool, optional
Flag indicating whether to demean data before computing principal
components. demean is ignored if standardize is True.
normalize : bool , optional
Indicates whether th normalize the factors to have unit inner
product. If False, the loadings will have unit inner product.
gls : bool, optional
Flag indicating to implement a two-step GLS estimator where
in the first step principal components are used to estimate residuals,
and then the inverse residual variance is used as a set of weights to
estimate the final principal components
weights : ndarray, optional
Series weights to use after transforming data according to standardize
or demean when computing the principal components.
method : str, optional
Determines the linear algebra routine uses. 'eig', the default,
uses an eigenvalue decomposition. 'svd' uses a singular value
decomposition.
Returns
-------
factors : {ndarray, DataFrame}
Array (nobs, ncomp) of principal components (also known as scores).
loadings : {ndarray, DataFrame}
Array (ncomp, nvar) of principal component loadings for constructing
the factors.
projection : {ndarray, DataFrame}
Array (nobs, nvar) containing the projection of the data onto the ncomp
estimated factors.
rsquare : {ndarray, Series}
Array (ncomp,) where the element in the ith position is the R-square
of including the fist i principal components. The values are
calculated on the transformed data, not the original data.
ic : {ndarray, DataFrame}
Array (ncomp, 3) containing the Bai and Ng (2003) Information
criteria. Each column is a different criteria, and each row
represents the number of included factors.
eigenvals : {ndarray, Series}
Array of eigenvalues (nvar,).
eigenvecs : {ndarray, DataFrame}
Array of eigenvectors. (nvar, nvar).
Notes
-----
This is a simple function wrapper around the PCA class. See PCA for
more information and additional methods. | pca | python | statsmodels/statsmodels | statsmodels/multivariate/pca.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/pca.py | BSD-3-Clause |
def endog_names(self):
"""Names of endogenous variables"""
if self._endog_names is not None:
return self._endog_names
else:
if self.endog is not None:
return self.data.ynames
else:
d = 0
n = self.corr.shape[0] - 1
while n > 0:
d += 1
n //= 10
return [('var%0' + str(d) + 'd') % i
for i in range(self.corr.shape[0])] | Names of endogenous variables | endog_names | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def fit(self, maxiter=50, tol=1e-8, start=None, opt_method='BFGS',
opt=None, em_iter=3):
"""
Estimate factor model parameters.
Parameters
----------
maxiter : int
Maximum number of iterations for iterative estimation algorithms
tol : float
Stopping criteria (error tolerance) for iterative estimation
algorithms
start : array_like
Starting values, currently only used for ML estimation
opt_method : str
Optimization method for ML estimation
opt : dict-like
Keyword arguments passed to optimizer, only used for ML estimation
em_iter : int
The number of EM iterations before starting gradient optimization,
only used for ML estimation.
Returns
-------
FactorResults
Results class instance.
"""
method = self.method.lower()
if method == 'pa':
return self._fit_pa(maxiter=maxiter, tol=tol)
elif method == 'ml':
return self._fit_ml(start, em_iter, opt_method, opt)
else:
msg = "Unknown factor extraction approach '%s'" % self.method
raise ValueError(msg) | Estimate factor model parameters.
Parameters
----------
maxiter : int
Maximum number of iterations for iterative estimation algorithms
tol : float
Stopping criteria (error tolerance) for iterative estimation
algorithms
start : array_like
Starting values, currently only used for ML estimation
opt_method : str
Optimization method for ML estimation
opt : dict-like
Keyword arguments passed to optimizer, only used for ML estimation
em_iter : int
The number of EM iterations before starting gradient optimization,
only used for ML estimation.
Returns
-------
FactorResults
Results class instance. | fit | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def _fit_pa(self, maxiter=50, tol=1e-8):
"""
Extract factors using the iterative principal axis method
Parameters
----------
maxiter : int
Maximum number of iterations for communality estimation
tol : float
If `norm(communality - last_communality) < tolerance`,
estimation stops
Returns
-------
results : FactorResults instance
"""
R = self.corr.copy() # inplace modification below
# Parameter validation
self.n_comp = matrix_rank(R)
if self.n_factor > self.n_comp:
raise ValueError('n_factor must be smaller or equal to the rank'
' of endog! %d > %d' %
(self.n_factor, self.n_comp))
if maxiter <= 0:
raise ValueError('n_max_iter must be larger than 0! %d < 0' %
(maxiter))
if tol <= 0 or tol > 0.01:
raise ValueError('tolerance must be larger than 0 and smaller than'
' 0.01! Got %f instead' % (tol))
# Initial communality estimation
if self.smc:
c = 1 - 1 / np.diag(inv(R))
else:
c = np.ones(len(R))
# Iterative communality estimation
eigenvals = None
for i in range(maxiter):
# Get eigenvalues/eigenvectors of R with diag replaced by
# communality
for j in range(len(R)):
R[j, j] = c[j]
L, V = eigh(R, UPLO='U')
c_last = np.array(c)
ind = np.argsort(L)
ind = ind[::-1]
L = L[ind]
n_pos = (L > 0).sum()
V = V[:, ind]
eigenvals = np.array(L)
# Select eigenvectors with positive eigenvalues
n = np.min([n_pos, self.n_factor])
sL = np.diag(np.sqrt(L[:n]))
V = V[:, :n]
# Calculate new loadings and communality
A = V.dot(sL)
c = np.power(A, 2).sum(axis=1)
if norm(c_last - c) < tol:
break
self.eigenvals = eigenvals
self.communality = c
self.uniqueness = 1 - c
self.loadings = A
return FactorResults(self) | Extract factors using the iterative principal axis method
Parameters
----------
maxiter : int
Maximum number of iterations for communality estimation
tol : float
If `norm(communality - last_communality) < tolerance`,
estimation stops
Returns
-------
results : FactorResults instance | _fit_pa | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def loglike(self, par):
"""
Evaluate the log-likelihood function.
Parameters
----------
par : ndarray or tuple of 2 ndarray's
The model parameters, either a packed representation of
the model parameters or a 2-tuple containing a `k_endog x
n_factor` matrix of factor loadings and a `k_endog` vector
of uniquenesses.
Returns
-------
float
The value of the log-likelihood evaluated at par.
"""
if type(par) is np.ndarray:
uniq, load = self._unpack(par)
else:
load, uniq = par[0], par[1]
loadu = load / uniq[:, None]
lul = np.dot(load.T, loadu)
# log|GG' + S|
# Using matrix determinant lemma:
# |GG' + S| = |I + G'S^{-1}G|*|S|
lul.flat[::lul.shape[0]+1] += 1
_, ld = np.linalg.slogdet(lul)
v = np.sum(np.log(uniq)) + ld
# tr((GG' + S)^{-1}C)
# Using Sherman-Morrison-Woodbury
w = np.sum(1 / uniq)
b = np.dot(load.T, self.corr / uniq[:, None])
b = np.linalg.solve(lul, b)
b = np.dot(loadu, b)
w -= np.trace(b)
# Scaled log-likelihood
return -(v + w) / (2*self.k_endog) | Evaluate the log-likelihood function.
Parameters
----------
par : ndarray or tuple of 2 ndarray's
The model parameters, either a packed representation of
the model parameters or a 2-tuple containing a `k_endog x
n_factor` matrix of factor loadings and a `k_endog` vector
of uniquenesses.
Returns
-------
float
The value of the log-likelihood evaluated at par. | loglike | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def score(self, par):
"""
Evaluate the score function (first derivative of loglike).
Parameters
----------
par : ndarray or tuple of 2 ndarray's
The model parameters, either a packed representation of
the model parameters or a 2-tuple containing a `k_endog x
n_factor` matrix of factor loadings and a `k_endog` vector
of uniquenesses.
Returns
-------
ndarray
The score function evaluated at par.
"""
if type(par) is np.ndarray:
uniq, load = self._unpack(par)
else:
load, uniq = par[0], par[1]
# Center term of SMW
loadu = load / uniq[:, None]
c = np.dot(load.T, loadu)
c.flat[::c.shape[0]+1] += 1
d = np.linalg.solve(c, load.T)
# Precompute these terms
lud = np.dot(loadu, d)
cu = (self.corr / uniq) / uniq[:, None]
r = np.dot(cu, load)
lul = np.dot(lud.T, load)
luz = np.dot(cu, lul)
# First term
du = 2*np.sqrt(uniq) * (1/uniq - (d * load.T).sum(0) / uniq**2)
dl = 2*(loadu - np.dot(lud, loadu))
# Second term
h = np.dot(lud, cu)
f = np.dot(h, lud.T)
du -= 2*np.sqrt(uniq) * (np.diag(cu) - 2*np.diag(h) + np.diag(f))
dl -= 2*r
dl += 2*np.dot(lud, r)
dl += 2*luz
dl -= 2*np.dot(lud, luz)
# Cannot use _pack because we are working with the square root
# uniquenesses directly.
return -np.concatenate((du, dl.T.flat)) / (2*self.k_endog) | Evaluate the score function (first derivative of loglike).
Parameters
----------
par : ndarray or tuple of 2 ndarray's
The model parameters, either a packed representation of
the model parameters or a 2-tuple containing a `k_endog x
n_factor` matrix of factor loadings and a `k_endog` vector
of uniquenesses.
Returns
-------
ndarray
The score function evaluated at par. | score | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def _fit_ml(self, start, em_iter, opt_method, opt):
"""estimate Factor model using Maximum Likelihood
"""
# Starting values
if start is None:
load, uniq = self._fit_ml_em(em_iter)
start = self._pack(load, uniq)
elif len(start) == 2:
if len(start[1]) != start[0].shape[0]:
msg = "Starting values have incompatible dimensions"
raise ValueError(msg)
start = self._pack(start[0], start[1])
else:
raise ValueError("Invalid starting values")
def nloglike(par):
return -self.loglike(par)
def nscore(par):
return -self.score(par)
# Do the optimization
if opt is None:
opt = _opt_defaults
r = minimize(nloglike, start, jac=nscore, method=opt_method,
options=opt)
if not r.success:
warnings.warn("Fitting did not converge")
par = r.x
uniq, load = self._unpack(par)
if uniq.min() < 1e-10:
warnings.warn("Some uniquenesses are nearly zero")
# Rotate solution to satisfy IC3 of Bai and Li
load = self._rotate(load, uniq)
self.uniqueness = uniq
self.communality = 1 - uniq
self.loadings = load
self.mle_retvals = r
return FactorResults(self) | estimate Factor model using Maximum Likelihood | _fit_ml | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def _fit_ml_em(self, iter, random_state=None):
"""estimate Factor model using EM algorithm
"""
# Starting values
if random_state is None:
random_state = np.random.RandomState(3427)
load = 0.1 * random_state.standard_normal(size=(self.k_endog, self.n_factor))
uniq = 0.5 * np.ones(self.k_endog)
for k in range(iter):
loadu = load / uniq[:, None]
f = np.dot(load.T, loadu)
f.flat[::f.shape[0]+1] += 1
r = np.linalg.solve(f, loadu.T)
q = np.dot(loadu.T, load)
h = np.dot(r, load)
c = load - np.dot(load, h)
c /= uniq[:, None]
g = np.dot(q, r)
e = np.dot(g, self.corr)
d = np.dot(loadu.T, self.corr) - e
a = np.dot(d, c)
a -= np.dot(load.T, c)
a.flat[::a.shape[0]+1] += 1
b = np.dot(self.corr, c)
load = np.linalg.solve(a, b.T).T
uniq = np.diag(self.corr) - (load * d.T).sum(1)
return load, uniq | estimate Factor model using EM algorithm | _fit_ml_em | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def _rotate(self, load, uniq):
"""rotate loadings for MLE
"""
# Rotations used in ML estimation.
load, s, _ = np.linalg.svd(load, 0)
load *= s
if self.nobs is None:
nobs = 1
else:
nobs = self.nobs
cm = np.dot(load.T, load / uniq[:, None]) / nobs
_, f = np.linalg.eig(cm)
load = np.dot(load, f)
return load | rotate loadings for MLE | _rotate | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def rotate(self, method):
"""
Apply rotation, inplace modification of this Results instance
Parameters
----------
method : str
Rotation to be applied. Allowed methods are varimax,
quartimax, biquartimax, equamax, oblimin, parsimax,
parsimony, biquartimin, promax.
Returns
-------
None : nothing returned, modifications are inplace
Notes
-----
Warning: 'varimax', 'quartimax' and 'oblimin' are verified against R or
Stata. Some rotation methods such as promax do not produce the same
results as the R or Stata default functions.
See Also
--------
factor_rotation : subpackage that implements rotation methods
"""
self.rotation_method = method
if method not in ['varimax', 'quartimax', 'biquartimax',
'equamax', 'oblimin', 'parsimax', 'parsimony',
'biquartimin', 'promax']:
raise ValueError('Unknown rotation method %s' % (method))
if method in ['varimax', 'quartimax', 'biquartimax', 'equamax',
'parsimax', 'parsimony', 'biquartimin']:
self.loadings, T = rotate_factors(self.loadings_no_rot, method)
elif method == 'oblimin':
self.loadings, T = rotate_factors(self.loadings_no_rot,
'quartimin')
elif method == 'promax':
self.loadings, T = promax(self.loadings_no_rot)
else:
raise ValueError('rotation method not recognized')
self.rotation_matrix = T | Apply rotation, inplace modification of this Results instance
Parameters
----------
method : str
Rotation to be applied. Allowed methods are varimax,
quartimax, biquartimax, equamax, oblimin, parsimax,
parsimony, biquartimin, promax.
Returns
-------
None : nothing returned, modifications are inplace
Notes
-----
Warning: 'varimax', 'quartimax' and 'oblimin' are verified against R or
Stata. Some rotation methods such as promax do not produce the same
results as the R or Stata default functions.
See Also
--------
factor_rotation : subpackage that implements rotation methods | rotate | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def _corr_factors(self):
"""correlation of factors implied by rotation
If the rotation is oblique, then the factors are correlated.
currently not cached
Returns
-------
corr_f : ndarray
correlation matrix of rotated factors, assuming initial factors are
orthogonal
"""
T = self.rotation_matrix
corr_f = T.T.dot(T)
return corr_f | correlation of factors implied by rotation
If the rotation is oblique, then the factors are correlated.
currently not cached
Returns
-------
corr_f : ndarray
correlation matrix of rotated factors, assuming initial factors are
orthogonal | _corr_factors | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def factor_score_params(self, method='bartlett'):
"""
Compute factor scoring coefficient matrix
The coefficient matrix is not cached.
Parameters
----------
method : 'bartlett' or 'regression'
Method to use for factor scoring.
'regression' can be abbreviated to `reg`
Returns
-------
coeff_matrix : ndarray
matrix s to compute factors f from a standardized endog ys.
``f = ys dot s``
Notes
-----
The `regression` method follows the Stata definition.
Method bartlett and regression are verified against Stats.
Two unofficial methods, 'ols' and 'gls', produce similar factor scores
but are not verified.
See Also
--------
statsmodels.multivariate.factor.FactorResults.factor_scoring
"""
L = self.loadings
#TODO: check row versus column convention for T
uni = 1 - self.communality #self.uniqueness
if method == 'bartlett':
s_mat = np.linalg.inv(L.T.dot(L/(uni[:,None]))).dot(L.T / uni).T
elif method.startswith('reg'):
corr = self.model.corr
corr_f = self._corr_factors()
# if orthogonal then corr_f is just eye
s_mat = corr_f.dot(L.T.dot(np.linalg.inv(corr))).T
elif method == 'ols':
# not verified
corr = self.model.corr
corr_f = self._corr_factors()
s_mat = corr_f.dot(np.linalg.pinv(L)).T
elif method == 'gls':
# not verified
#s_mat = np.linalg.inv(1*np.eye(L.shape[1]) + L.T.dot(L/(uni[:,None])))
corr = self.model.corr
corr_f = self._corr_factors()
s_mat = np.linalg.inv(np.linalg.inv(corr_f) + L.T.dot(L/(uni[:,None])))
s_mat = s_mat.dot(L.T / uni).T
else:
raise ValueError('method not available, use "bartlett ' +
'or "regression"')
return s_mat | Compute factor scoring coefficient matrix
The coefficient matrix is not cached.
Parameters
----------
method : 'bartlett' or 'regression'
Method to use for factor scoring.
'regression' can be abbreviated to `reg`
Returns
-------
coeff_matrix : ndarray
matrix s to compute factors f from a standardized endog ys.
``f = ys dot s``
Notes
-----
The `regression` method follows the Stata definition.
Method bartlett and regression are verified against Stats.
Two unofficial methods, 'ols' and 'gls', produce similar factor scores
but are not verified.
See Also
--------
statsmodels.multivariate.factor.FactorResults.factor_scoring | factor_score_params | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def factor_scoring(self, endog=None, method='bartlett', transform=True):
"""
factor scoring: compute factors for endog
If endog was not provided when creating the factor class, then
a standarized endog needs to be provided here.
Parameters
----------
method : 'bartlett' or 'regression'
Method to use for factor scoring.
'regression' can be abbreviated to `reg`
transform : bool
If transform is true and endog is provided, then it will be
standardized using mean and scale of original data, which has to
be available in this case.
If transform is False, then a provided endog will be used unchanged.
The original endog in the Factor class will
always be standardized if endog is None, independently of `transform`.
Returns
-------
factor_score : ndarray
estimated factors using scoring matrix s and standarized endog ys
``f = ys dot s``
Notes
-----
Status: transform option is experimental and might change.
See Also
--------
statsmodels.multivariate.factor.FactorResults.factor_score_params
"""
if transform is False and endog is not None:
# no transformation in this case
endog = np.asarray(endog)
else:
# we need to standardize with the original mean and scale
if self.model.endog is not None:
m = self.model.endog.mean(0)
s = self.model.endog.std(ddof=1, axis=0)
if endog is None:
endog = self.model.endog
else:
endog = np.asarray(endog)
else:
raise ValueError('If transform is True, then `endog` needs ' +
'to be available in the Factor instance.')
endog = (endog - m) / s
s_mat = self.factor_score_params(method=method)
factors = endog.dot(s_mat)
return factors | factor scoring: compute factors for endog
If endog was not provided when creating the factor class, then
a standarized endog needs to be provided here.
Parameters
----------
method : 'bartlett' or 'regression'
Method to use for factor scoring.
'regression' can be abbreviated to `reg`
transform : bool
If transform is true and endog is provided, then it will be
standardized using mean and scale of original data, which has to
be available in this case.
If transform is False, then a provided endog will be used unchanged.
The original endog in the Factor class will
always be standardized if endog is None, independently of `transform`.
Returns
-------
factor_score : ndarray
estimated factors using scoring matrix s and standarized endog ys
``f = ys dot s``
Notes
-----
Status: transform option is experimental and might change.
See Also
--------
statsmodels.multivariate.factor.FactorResults.factor_score_params | factor_scoring | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def summary(self):
"""Summary"""
summ = summary2.Summary()
summ.add_title('Factor analysis results')
loadings_no_rot = pd.DataFrame(
self.loadings_no_rot,
columns=["factor %d" % (i)
for i in range(self.loadings_no_rot.shape[1])],
index=self.endog_names
)
if hasattr(self, "eigenvals"):
# eigenvals not available for ML method
eigenvals = pd.DataFrame(
[self.eigenvals], columns=self.endog_names, index=[''])
summ.add_dict({'': 'Eigenvalues'})
summ.add_df(eigenvals)
communality = pd.DataFrame([self.communality],
columns=self.endog_names, index=[''])
summ.add_dict({'': ''})
summ.add_dict({'': 'Communality'})
summ.add_df(communality)
summ.add_dict({'': ''})
summ.add_dict({'': 'Pre-rotated loadings'})
summ.add_df(loadings_no_rot)
summ.add_dict({'': ''})
if self.rotation_method is not None:
loadings = pd.DataFrame(
self.loadings,
columns=["factor %d" % (i)
for i in range(self.loadings.shape[1])],
index=self.endog_names
)
summ.add_dict({'': '%s rotated loadings' % (self.rotation_method)})
summ.add_df(loadings)
return summ | Summary | summary | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def color_white_small(val):
"""
Takes a scalar and returns a string with
the css property `'color: white'` for small values, black otherwise.
takes threshold from outer scope
"""
color = 'white' if np.abs(val) < threshold else 'black'
return 'color: %s' % color | Takes a scalar and returns a string with
the css property `'color: white'` for small values, black otherwise.
takes threshold from outer scope | get_loadings_frame.color_white_small | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
s = np.abs(s)
is_max = s == s.max()
return ['background-color: '+ color_max if v else '' for v in is_max] | highlight the maximum in a Series yellow. | get_loadings_frame.highlight_max | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def get_loadings_frame(self, style='display', sort_=True, threshold=0.3,
highlight_max=True, color_max='yellow',
decimals=None):
"""get loadings matrix as DataFrame or pandas Styler
Parameters
----------
style : 'display' (default), 'raw' or 'strings'
Style to use for display
* 'raw' returns just a DataFrame of the loadings matrix, no options are
applied
* 'display' add sorting and styling as defined by other keywords
* 'strings' returns a DataFrame with string elements with optional sorting
and suppressing small loading coefficients.
sort_ : bool
If True, then the rows of the DataFrame is sorted by contribution of each
factor. applies if style is either 'display' or 'strings'
threshold : float
If the threshold is larger than zero, then loading coefficients are
either colored white (if style is 'display') or replace by empty
string (if style is 'strings').
highlight_max : bool
This add a background color to the largest coefficient in each row.
color_max : html color
default is 'yellow'. color for background of row maximum
decimals : None or int
If None, then pandas default precision applies. Otherwise values are
rounded to the specified decimals. If style is 'display', then the
underlying dataframe is not changed. If style is 'strings', then
values are rounded before conversion to strings.
Returns
-------
loadings : DataFrame or pandas Styler instance
The return is a pandas Styler instance, if style is 'display' and
at least one of highlight_max, threshold or decimals is applied.
Otherwise, the returned loadings is a DataFrame.
Examples
--------
>>> mod = Factor(df, 3, smc=True)
>>> res = mod.fit()
>>> res.get_loadings_frame(style='display', decimals=3, threshold=0.2)
To get a sorted DataFrame, all styling options need to be turned off:
>>> df_sorted = res.get_loadings_frame(style='display',
... highlight_max=False, decimals=None, threshold=0)
Options except for highlighting are available for plain test or Latex
usage:
>>> lds = res_u.get_loadings_frame(style='strings', decimals=3,
... threshold=0.3)
>>> print(lds.to_latex())
"""
loadings_df = pd.DataFrame(
self.loadings,
columns=["factor %d" % (i)
for i in range(self.loadings.shape[1])],
index=self.endog_names
)
if style not in ['raw', 'display', 'strings']:
msg = "style has to be one of 'raw', 'display', 'strings'"
raise ValueError(msg)
if style == 'raw':
return loadings_df
# add sorting and some formatting
if sort_ is True:
loadings_df2 = loadings_df.copy()
n_f = len(loadings_df2)
high = np.abs(loadings_df2.values).argmax(1)
loadings_df2['high'] = high
loadings_df2['largest'] = np.abs(loadings_df.values[np.arange(n_f), high])
loadings_df2.sort_values(by=['high', 'largest'], ascending=[True, False], inplace=True)
loadings_df = loadings_df2.drop(['high', 'largest'], axis=1)
if style == 'display':
sty = None
if threshold > 0:
def color_white_small(val):
"""
Takes a scalar and returns a string with
the css property `'color: white'` for small values, black otherwise.
takes threshold from outer scope
"""
color = 'white' if np.abs(val) < threshold else 'black'
return 'color: %s' % color
try:
sty = loadings_df.style.map(color_white_small)
except AttributeError:
# Deprecated in pandas 2.1
sty = loadings_df.style.applymap(color_white_small)
if highlight_max is True:
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
s = np.abs(s)
is_max = s == s.max()
return ['background-color: '+ color_max if v else '' for v in is_max]
if sty is None:
sty = loadings_df.style
sty = sty.apply(highlight_max, axis=1)
if decimals is not None:
if sty is None:
sty = loadings_df.style
sty.format("{:.%sf}" % decimals)
if sty is None:
return loadings_df
else:
return sty
if style == 'strings':
ld = loadings_df
if decimals is not None:
ld = ld.round(decimals)
ld = ld.astype(str)
if threshold > 0:
ld[loadings_df.abs() < threshold] = ''
return ld | get loadings matrix as DataFrame or pandas Styler
Parameters
----------
style : 'display' (default), 'raw' or 'strings'
Style to use for display
* 'raw' returns just a DataFrame of the loadings matrix, no options are
applied
* 'display' add sorting and styling as defined by other keywords
* 'strings' returns a DataFrame with string elements with optional sorting
and suppressing small loading coefficients.
sort_ : bool
If True, then the rows of the DataFrame is sorted by contribution of each
factor. applies if style is either 'display' or 'strings'
threshold : float
If the threshold is larger than zero, then loading coefficients are
either colored white (if style is 'display') or replace by empty
string (if style is 'strings').
highlight_max : bool
This add a background color to the largest coefficient in each row.
color_max : html color
default is 'yellow'. color for background of row maximum
decimals : None or int
If None, then pandas default precision applies. Otherwise values are
rounded to the specified decimals. If style is 'display', then the
underlying dataframe is not changed. If style is 'strings', then
values are rounded before conversion to strings.
Returns
-------
loadings : DataFrame or pandas Styler instance
The return is a pandas Styler instance, if style is 'display' and
at least one of highlight_max, threshold or decimals is applied.
Otherwise, the returned loadings is a DataFrame.
Examples
--------
>>> mod = Factor(df, 3, smc=True)
>>> res = mod.fit()
>>> res.get_loadings_frame(style='display', decimals=3, threshold=0.2)
To get a sorted DataFrame, all styling options need to be turned off:
>>> df_sorted = res.get_loadings_frame(style='display',
... highlight_max=False, decimals=None, threshold=0)
Options except for highlighting are available for plain test or Latex
usage:
>>> lds = res_u.get_loadings_frame(style='strings', decimals=3,
... threshold=0.3)
>>> print(lds.to_latex()) | get_loadings_frame | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def plot_scree(self, ncomp=None):
"""
Plot of the ordered eigenvalues and variance explained for the loadings
Parameters
----------
ncomp : int, optional
Number of loadings to include in the plot. If None, will
included the same as the number of maximum possible loadings
Returns
-------
Figure
Handle to the figure.
"""
_import_mpl()
from .plots import plot_scree
return plot_scree(self.eigenvals, self.n_comp, ncomp) | Plot of the ordered eigenvalues and variance explained for the loadings
Parameters
----------
ncomp : int, optional
Number of loadings to include in the plot. If None, will
included the same as the number of maximum possible loadings
Returns
-------
Figure
Handle to the figure. | plot_scree | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def plot_loadings(self, loading_pairs=None, plot_prerotated=False):
"""
Plot factor loadings in 2-d plots
Parameters
----------
loading_pairs : None or a list of tuples
Specify plots. Each tuple (i, j) represent one figure, i and j is
the loading number for x-axis and y-axis, respectively. If `None`,
all combinations of the loadings will be plotted.
plot_prerotated : True or False
If True, the loadings before rotation applied will be plotted. If
False, rotated loadings will be plotted.
Returns
-------
figs : a list of figure handles
"""
_import_mpl()
from .plots import plot_loadings
if self.rotation_method is None:
plot_prerotated = True
loadings = self.loadings_no_rot if plot_prerotated else self.loadings
if plot_prerotated:
title = 'Prerotated Factor Pattern'
else:
title = '%s Rotated Factor Pattern' % (self.rotation_method)
var_explained = self.eigenvals / self.n_comp * 100
return plot_loadings(loadings, loading_pairs=loading_pairs,
title=title, row_names=self.endog_names,
percent_variance=var_explained) | Plot factor loadings in 2-d plots
Parameters
----------
loading_pairs : None or a list of tuples
Specify plots. Each tuple (i, j) represent one figure, i and j is
the loading number for x-axis and y-axis, respectively. If `None`,
all combinations of the loadings will be plotted.
plot_prerotated : True or False
If True, the loadings before rotation applied will be plotted. If
False, rotated loadings will be plotted.
Returns
-------
figs : a list of figure handles | plot_loadings | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def fitted_cov(self):
"""
Returns the fitted covariance matrix.
"""
c = np.dot(self.loadings, self.loadings.T)
c.flat[::c.shape[0]+1] += self.uniqueness
return c | Returns the fitted covariance matrix. | fitted_cov | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def uniq_stderr(self, kurt=0):
"""
The standard errors of the uniquenesses.
Parameters
----------
kurt : float
Excess kurtosis
Notes
-----
If excess kurtosis is known, provide as `kurt`. Standard
errors are only available if the model was fit using maximum
likelihood. If `endog` is not provided, `nobs` must be
provided to obtain standard errors.
These are asymptotic standard errors. See Bai and Li (2012)
for conditions under which the standard errors are valid.
The standard errors are only applicable to the original,
unrotated maximum likelihood solution.
"""
if self.fa_method.lower() != "ml":
msg = "Standard errors only available under ML estimation"
raise ValueError(msg)
if self.nobs is None:
msg = "nobs is required to obtain standard errors."
raise ValueError(msg)
v = self.uniqueness**2 * (2 + kurt)
return np.sqrt(v / self.nobs) | The standard errors of the uniquenesses.
Parameters
----------
kurt : float
Excess kurtosis
Notes
-----
If excess kurtosis is known, provide as `kurt`. Standard
errors are only available if the model was fit using maximum
likelihood. If `endog` is not provided, `nobs` must be
provided to obtain standard errors.
These are asymptotic standard errors. See Bai and Li (2012)
for conditions under which the standard errors are valid.
The standard errors are only applicable to the original,
unrotated maximum likelihood solution. | uniq_stderr | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def load_stderr(self):
"""
The standard errors of the loadings.
Standard errors are only available if the model was fit using
maximum likelihood. If `endog` is not provided, `nobs` must be
provided to obtain standard errors.
These are asymptotic standard errors. See Bai and Li (2012)
for conditions under which the standard errors are valid.
The standard errors are only applicable to the original,
unrotated maximum likelihood solution.
"""
if self.fa_method.lower() != "ml":
msg = "Standard errors only available under ML estimation"
raise ValueError(msg)
if self.nobs is None:
msg = "nobs is required to obtain standard errors."
raise ValueError(msg)
v = np.outer(self.uniqueness, np.ones(self.loadings.shape[1]))
return np.sqrt(v / self.nobs) | The standard errors of the loadings.
Standard errors are only available if the model was fit using
maximum likelihood. If `endog` is not provided, `nobs` must be
provided to obtain standard errors.
These are asymptotic standard errors. See Bai and Li (2012)
for conditions under which the standard errors are valid.
The standard errors are only applicable to the original,
unrotated maximum likelihood solution. | load_stderr | python | statsmodels/statsmodels | statsmodels/multivariate/factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor.py | BSD-3-Clause |
def mv_test(self, hypotheses=None, skip_intercept_test=False):
"""
Linear hypotheses testing
Parameters
----------
%(hypotheses_doc)s
skip_intercept_test : bool
If true, then testing the intercept is skipped, the model is not
changed.
Note: If a term has a numerically insignificant effect, then
an exception because of emtpy arrays may be raised. This can
happen for the intercept if the data has been demeaned.
Returns
-------
results: MultivariateTestResults
Notes
-----
Testing the linear hypotheses
L * params * M = 0
where `params` is the regression coefficient matrix for the
linear model y = x * params
If the model is not specified using the formula interfact, then the
hypotheses test each included exogenous variable, one at a time. In
most applications with categorical variables, the ``from_formula``
interface should be preferred when specifying a model since it
provides knowledge about the model when specifying the hypotheses.
"""
if hypotheses is None:
if (
hasattr(self, "data")
and self.data is not None
and hasattr(self.data, "model_spec")
):
# TODO: patsy migration
mgr = FormulaManager()
terms = mgr.get_term_name_slices(self.data.model_spec)
hypotheses = []
for key in terms:
if skip_intercept_test and (
key == "Intercept" or key == mgr.intercept_term
):
continue
L_contrast = np.eye(self.exog.shape[1])[terms[key], :]
test_name = str(key)
if key == mgr.intercept_term:
test_name = "Intercept"
hypotheses.append([test_name, L_contrast, None])
else:
hypotheses = []
for i in range(self.exog.shape[1]):
name = "x%d" % (i)
L = np.zeros([1, self.exog.shape[1]])
L[0, i] = 1
hypotheses.append([name, L, None])
results = _multivariate_ols_test(
hypotheses, self._fittedmod, self.exog_names, self.endog_names
)
return MultivariateTestResults(results, self.endog_names, self.exog_names) | Linear hypotheses testing
Parameters
----------
%(hypotheses_doc)s
skip_intercept_test : bool
If true, then testing the intercept is skipped, the model is not
changed.
Note: If a term has a numerically insignificant effect, then
an exception because of emtpy arrays may be raised. This can
happen for the intercept if the data has been demeaned.
Returns
-------
results: MultivariateTestResults
Notes
-----
Testing the linear hypotheses
L * params * M = 0
where `params` is the regression coefficient matrix for the
linear model y = x * params
If the model is not specified using the formula interfact, then the
hypotheses test each included exogenous variable, one at a time. In
most applications with categorical variables, the ``from_formula``
interface should be preferred when specifying a model since it
provides knowledge about the model when specifying the hypotheses. | mv_test | python | statsmodels/statsmodels | statsmodels/multivariate/manova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/manova.py | BSD-3-Clause |
def plot_scree(eigenvals, total_var, ncomp=None, x_label='factor'):
"""
Plot of the ordered eigenvalues and variance explained for the loadings
Parameters
----------
eigenvals : array_like
The eigenvalues
total_var : float
the total variance (for plotting percent variance explained)
ncomp : int, optional
Number of factors to include in the plot. If None, will
included the same as the number of maximum possible loadings
x_label : str
label of x-axis
Returns
-------
Figure
Handle to the figure.
"""
fig = plt.figure()
ncomp = len(eigenvals) if ncomp is None else ncomp
vals = eigenvals
vals = vals[:ncomp]
# vals = np.cumsum(vals)
ax = fig.add_subplot(121)
ax.plot(np.arange(ncomp), vals[: ncomp], 'b-o')
ax.autoscale(tight=True)
xlim = np.array(ax.get_xlim())
sp = xlim[1] - xlim[0]
xlim += 0.02 * np.array([-sp, sp])
ax.set_xticks(np.arange(ncomp))
ax.set_xlim(xlim)
ylim = np.array(ax.get_ylim())
scale = 0.02
sp = ylim[1] - ylim[0]
ylim += scale * np.array([-sp, sp])
ax.set_ylim(ylim)
ax.set_title('Scree Plot')
ax.set_ylabel('Eigenvalue')
ax.set_xlabel(x_label)
per_variance = vals / total_var
cumper_variance = np.cumsum(per_variance)
ax = fig.add_subplot(122)
ax.plot(np.arange(ncomp), per_variance[: ncomp], 'b-o')
ax.plot(np.arange(ncomp), cumper_variance[: ncomp], 'g--o')
ax.autoscale(tight=True)
xlim = np.array(ax.get_xlim())
sp = xlim[1] - xlim[0]
xlim += 0.02 * np.array([-sp, sp])
ax.set_xticks(np.arange(ncomp))
ax.set_xlim(xlim)
ylim = np.array(ax.get_ylim())
scale = 0.02
sp = ylim[1] - ylim[0]
ylim += scale * np.array([-sp, sp])
ax.set_ylim(ylim)
ax.set_title('Variance Explained')
ax.set_ylabel('Proportion')
ax.set_xlabel(x_label)
ax.legend(['Proportion', 'Cumulative'], loc=5)
fig.tight_layout()
return fig | Plot of the ordered eigenvalues and variance explained for the loadings
Parameters
----------
eigenvals : array_like
The eigenvalues
total_var : float
the total variance (for plotting percent variance explained)
ncomp : int, optional
Number of factors to include in the plot. If None, will
included the same as the number of maximum possible loadings
x_label : str
label of x-axis
Returns
-------
Figure
Handle to the figure. | plot_scree | python | statsmodels/statsmodels | statsmodels/multivariate/plots.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/plots.py | BSD-3-Clause |
def plot_loadings(loadings, col_names=None, row_names=None,
loading_pairs=None, percent_variance=None,
title='Factor patterns'):
"""
Plot factor loadings in 2-d plots
Parameters
----------
loadings : array like
Each column is a component (or factor)
col_names : a list of strings
column names of `loadings`
row_names : a list of strings
row names of `loadings`
loading_pairs : None or a list of tuples
Specify plots. Each tuple (i, j) represent one figure, i and j is
the loading number for x-axis and y-axis, respectively. If `None`,
all combinations of the loadings will be plotted.
percent_variance : array_like
The percent variance explained by each factor.
Returns
-------
figs : a list of figure handles
"""
k_var, n_factor = loadings.shape
if loading_pairs is None:
loading_pairs = []
for i in range(n_factor):
for j in range(i + 1,n_factor):
loading_pairs.append([i, j])
if col_names is None:
col_names = ["factor %d" % i for i in range(n_factor)]
if row_names is None:
row_names = ["var %d" % i for i in range(k_var)]
figs = []
for item in loading_pairs:
i = item[0]
j = item[1]
fig = plt.figure(figsize=(7, 7))
figs.append(fig)
ax = fig.add_subplot(111)
for k in range(loadings.shape[0]):
plt.text(loadings[k, i], loadings[k, j],
row_names[k], fontsize=12)
ax.plot(loadings[:, i], loadings[:, j], 'bo')
ax.set_title(title)
if percent_variance is not None:
x_str = f'{col_names[i]} ({percent_variance[i]:.1f}%)'
y_str = f'{col_names[j]} ({percent_variance[j]:.1f}%)'
ax.set_xlabel(x_str)
ax.set_ylabel(y_str)
else:
ax.set_xlabel(col_names[i])
ax.set_ylabel(col_names[j])
v = 1.05
xlim = np.array([-v, v])
ylim = np.array([-v, v])
ax.plot(xlim, [0, 0], 'k--')
ax.plot([0, 0], ylim, 'k--')
ax.set_aspect('equal', 'datalim')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
fig.tight_layout()
return figs | Plot factor loadings in 2-d plots
Parameters
----------
loadings : array like
Each column is a component (or factor)
col_names : a list of strings
column names of `loadings`
row_names : a list of strings
row names of `loadings`
loading_pairs : None or a list of tuples
Specify plots. Each tuple (i, j) represent one figure, i and j is
the loading number for x-axis and y-axis, respectively. If `None`,
all combinations of the loadings will be plotted.
percent_variance : array_like
The percent variance explained by each factor.
Returns
-------
figs : a list of figure handles | plot_loadings | python | statsmodels/statsmodels | statsmodels/multivariate/plots.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/plots.py | BSD-3-Clause |
def _fit(self, tolerance=1e-8):
"""Fit the model
A ValueError is raised if there are singular values smaller than the
tolerance. The treatment of singular arrays might change in future.
Parameters
----------
tolerance : float
eigenvalue tolerance, values smaller than which is considered 0
"""
nobs, k_yvar = self.endog.shape
nobs, k_xvar = self.exog.shape
k = np.min([k_yvar, k_xvar])
x = np.array(self.exog)
x = x - x.mean(0)
y = np.array(self.endog)
y = y - y.mean(0)
ux, sx, vx = svd(x, 0)
# vx_ds = vx.T divided by sx
vx_ds = vx.T
mask = sx > tolerance
if mask.sum() < len(mask):
raise ValueError('exog is collinear.')
vx_ds[:, mask] /= sx[mask]
uy, sy, vy = svd(y, 0)
# vy_ds = vy.T divided by sy
vy_ds = vy.T
mask = sy > tolerance
if mask.sum() < len(mask):
raise ValueError('endog is collinear.')
vy_ds[:, mask] /= sy[mask]
u, s, v = svd(ux.T.dot(uy), 0)
# Correct any roundoff
self.cancorr = np.array([max(0, min(s[i], 1)) for i in range(len(s))])
self.x_cancoef = vx_ds.dot(u[:, :k])
self.y_cancoef = vy_ds.dot(v.T[:, :k]) | Fit the model
A ValueError is raised if there are singular values smaller than the
tolerance. The treatment of singular arrays might change in future.
Parameters
----------
tolerance : float
eigenvalue tolerance, values smaller than which is considered 0 | _fit | python | statsmodels/statsmodels | statsmodels/multivariate/cancorr.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/cancorr.py | BSD-3-Clause |
def corr_test(self):
"""Approximate F test
Perform multivariate statistical tests of the hypothesis that
there is no canonical correlation between endog and exog.
For each canonical correlation, testing its significance based on
Wilks' lambda.
Returns
-------
CanCorrTestResults instance
"""
nobs, k_yvar = self.endog.shape
nobs, k_xvar = self.exog.shape
eigenvals = np.power(self.cancorr, 2)
stats = pd.DataFrame(columns=['Canonical Correlation', "Wilks' lambda",
'Num DF','Den DF', 'F Value','Pr > F'],
index=list(range(len(eigenvals) - 1, -1, -1)))
prod = 1
for i in range(len(eigenvals) - 1, -1, -1):
prod *= 1 - eigenvals[i]
p = k_yvar - i
q = k_xvar - i
r = (nobs - k_yvar - 1) - (p - q + 1) / 2
u = (p * q - 2) / 4
df1 = p * q
if p ** 2 + q ** 2 - 5 > 0:
t = np.sqrt(((p * q) ** 2 - 4) / (p ** 2 + q ** 2 - 5))
else:
t = 1
df2 = r * t - 2 * u
lmd = np.power(prod, 1 / t)
F = (1 - lmd) / lmd * df2 / df1
stats.loc[i, 'Canonical Correlation'] = self.cancorr[i]
stats.loc[i, "Wilks' lambda"] = prod
stats.loc[i, 'Num DF'] = df1
stats.loc[i, 'Den DF'] = df2
stats.loc[i, 'F Value'] = F
pval = scipy.stats.f.sf(F, df1, df2)
stats.loc[i, 'Pr > F'] = pval
'''
# Wilk's Chi square test of each canonical correlation
df = (p - i + 1) * (q - i + 1)
chi2 = a * np.log(prod)
pval = stats.chi2.sf(chi2, df)
stats.loc[i, 'Canonical correlation'] = self.cancorr[i]
stats.loc[i, 'Chi-square'] = chi2
stats.loc[i, 'DF'] = df
stats.loc[i, 'Pr > ChiSq'] = pval
'''
ind = stats.index.values[::-1]
stats = stats.loc[ind, :]
# Multivariate tests (remember x has mean removed)
stats_mv = multivariate_stats(eigenvals,
k_yvar, k_xvar, nobs - k_xvar - 1)
return CanCorrTestResults(stats, stats_mv) | Approximate F test
Perform multivariate statistical tests of the hypothesis that
there is no canonical correlation between endog and exog.
For each canonical correlation, testing its significance based on
Wilks' lambda.
Returns
-------
CanCorrTestResults instance | corr_test | python | statsmodels/statsmodels | statsmodels/multivariate/cancorr.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/cancorr.py | BSD-3-Clause |
def _multivariate_ols_fit(endog, exog, method='svd', tolerance=1e-8):
"""
Solve multivariate linear model y = x * params
where y is dependent variables, x is independent variables
Parameters
----------
endog : array_like
each column is a dependent variable
exog : array_like
each column is a independent variable
method : str
'svd' - Singular value decomposition
'pinv' - Moore-Penrose pseudoinverse
tolerance : float, a small positive number
Tolerance for eigenvalue. Values smaller than tolerance is considered
zero.
Returns
-------
a tuple of matrices or values necessary for hypotheses testing
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
Notes
-----
Status: experimental and incomplete
"""
y = endog
x = exog
nobs, k_endog = y.shape #noqa: F841
nobs1, k_exog= x.shape
if nobs != nobs1:
raise ValueError('x(n=%d) and y(n=%d) should have the same number of '
'rows!' % (nobs1, nobs))
# Calculate the matrices necessary for hypotheses testing
df_resid = nobs - k_exog
if method == 'pinv':
# Regression coefficients matrix
pinv_x = pinv(x)
params = pinv_x.dot(y)
# inverse of x'x
inv_cov = pinv_x.dot(pinv_x.T)
if matrix_rank(inv_cov,tol=tolerance) < k_exog:
raise ValueError('Covariance of x singular!')
# Sums of squares and cross-products of residuals
# Y'Y - (X * params)'B * params
t = x.dot(params)
sscpr = np.subtract(y.T.dot(y), t.T.dot(t))
return (params, df_resid, inv_cov, sscpr)
elif method == 'svd':
u, s, v = svd(x, 0)
if (s > tolerance).sum() < len(s):
raise ValueError('Covariance of x singular!')
invs = 1. / s
params = v.T.dot(np.diag(invs)).dot(u.T).dot(y)
inv_cov = v.T.dot(np.diag(np.power(invs, 2))).dot(v)
t = np.diag(s).dot(v).dot(params)
sscpr = np.subtract(y.T.dot(y), t.T.dot(t))
return (params, df_resid, inv_cov, sscpr)
else:
raise ValueError('%s is not a supported method!' % method) | Solve multivariate linear model y = x * params
where y is dependent variables, x is independent variables
Parameters
----------
endog : array_like
each column is a dependent variable
exog : array_like
each column is a independent variable
method : str
'svd' - Singular value decomposition
'pinv' - Moore-Penrose pseudoinverse
tolerance : float, a small positive number
Tolerance for eigenvalue. Values smaller than tolerance is considered
zero.
Returns
-------
a tuple of matrices or values necessary for hypotheses testing
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
Notes
-----
Status: experimental and incomplete | _multivariate_ols_fit | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def multivariate_stats(eigenvals,
r_err_sscp,
r_contrast, df_resid, tolerance=1e-8):
"""
For multivariate linear model Y = X * B
Testing hypotheses
L*B*M = 0
where L is contrast matrix, B is the parameters of the
multivariate linear model and M is dependent variable transform matrix.
T = L*inv(X'X)*L'
H = M'B'L'*inv(T)*LBM
E = M'(Y'Y - B'X'XB)M
Parameters
----------
eigenvals : ndarray
The eigenvalues of inv(E + H)*H
r_err_sscp : int
Rank of E + H
r_contrast : int
Rank of T matrix
df_resid : int
Residual degree of freedom (n_samples minus n_variables of X)
tolerance : float
smaller than which eigenvalue is considered 0
Returns
-------
A DataFrame
References
----------
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
"""
v = df_resid
p = r_err_sscp
q = r_contrast
s = np.min([p, q])
ind = eigenvals > tolerance
# n_e = ind.sum()
eigv2 = eigenvals[ind]
eigv1 = np.array([i / (1 - i) for i in eigv2])
m = (np.abs(p - q) - 1) / 2
n = (v - p - 1) / 2
cols = ['Value', 'Num DF', 'Den DF', 'F Value', 'Pr > F']
index = ["Wilks' lambda", "Pillai's trace",
"Hotelling-Lawley trace", "Roy's greatest root"]
results = pd.DataFrame(columns=cols,
index=index)
def fn(x):
return np.real([x])[0]
results.loc["Wilks' lambda", 'Value'] = fn(np.prod(1 - eigv2))
results.loc["Pillai's trace", 'Value'] = fn(eigv2.sum())
results.loc["Hotelling-Lawley trace", 'Value'] = fn(eigv1.sum())
results.loc["Roy's greatest root", 'Value'] = fn(eigv1.max())
r = v - (p - q + 1)/2
u = (p*q - 2) / 4
df1 = p * q
if p*p + q*q - 5 > 0:
t = np.sqrt((p*p*q*q - 4) / (p*p + q*q - 5))
else:
t = 1
df2 = r*t - 2*u
lmd = results.loc["Wilks' lambda", 'Value']
lmd = np.power(lmd, 1 / t)
F = (1 - lmd) / lmd * df2 / df1
results.loc["Wilks' lambda", 'Num DF'] = df1
results.loc["Wilks' lambda", 'Den DF'] = df2
results.loc["Wilks' lambda", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Wilks' lambda", 'Pr > F'] = pval
V = results.loc["Pillai's trace", 'Value']
df1 = s * (2*m + s + 1)
df2 = s * (2*n + s + 1)
F = df2 / df1 * V / (s - V)
results.loc["Pillai's trace", 'Num DF'] = df1
results.loc["Pillai's trace", 'Den DF'] = df2
results.loc["Pillai's trace", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Pillai's trace", 'Pr > F'] = pval
U = results.loc["Hotelling-Lawley trace", 'Value']
if n > 0:
b = (p + 2*n) * (q + 2*n) / 2 / (2*n + 1) / (n - 1)
df1 = p * q
df2 = 4 + (p*q + 2) / (b - 1)
c = (df2 - 2) / 2 / n
F = df2 / df1 * U / c
else:
df1 = s * (2*m + s + 1)
df2 = s * (s*n + 1)
F = df2 / df1 / s * U
results.loc["Hotelling-Lawley trace", 'Num DF'] = df1
results.loc["Hotelling-Lawley trace", 'Den DF'] = df2
results.loc["Hotelling-Lawley trace", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Hotelling-Lawley trace", 'Pr > F'] = pval
sigma = results.loc["Roy's greatest root", 'Value']
r = np.max([p, q])
df1 = r
df2 = v - r + q
F = df2 / df1 * sigma
results.loc["Roy's greatest root", 'Num DF'] = df1
results.loc["Roy's greatest root", 'Den DF'] = df2
results.loc["Roy's greatest root", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Roy's greatest root", 'Pr > F'] = pval
return results | For multivariate linear model Y = X * B
Testing hypotheses
L*B*M = 0
where L is contrast matrix, B is the parameters of the
multivariate linear model and M is dependent variable transform matrix.
T = L*inv(X'X)*L'
H = M'B'L'*inv(T)*LBM
E = M'(Y'Y - B'X'XB)M
Parameters
----------
eigenvals : ndarray
The eigenvalues of inv(E + H)*H
r_err_sscp : int
Rank of E + H
r_contrast : int
Rank of T matrix
df_resid : int
Residual degree of freedom (n_samples minus n_variables of X)
tolerance : float
smaller than which eigenvalue is considered 0
Returns
-------
A DataFrame
References
----------
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm | multivariate_stats | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def _multivariate_test(hypotheses, exog_names, endog_names, fn):
"""
Multivariate linear model hypotheses testing
For y = x * params, where y are the dependent variables and x are the
independent variables, testing L * params * M = 0 where L is the contrast
matrix for hypotheses testing and M is the transformation matrix for
transforming the dependent variables in y.
Algorithm:
T = L*inv(X'X)*L'
H = M'B'L'*inv(T)*LBM
E = M'(Y'Y - B'X'XB)M
where H and E correspond to the numerator and denominator of a univariate
F-test. Then find the eigenvalues of inv(H + E)*H from which the
multivariate test statistics are calculated.
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML
/default/viewer.htm#statug_introreg_sect012.htm
Parameters
----------
%(hypotheses_doc)s
k_xvar : int
The number of independent variables
k_yvar : int
The number of dependent variables
fn : function
a function fn(contrast_L, transform_M) that returns E, H, q, df_resid
where q is the rank of T matrix
Returns
-------
results : MANOVAResults
"""
k_xvar = len(exog_names)
k_yvar = len(endog_names)
results = {}
for hypo in hypotheses:
if len(hypo) ==2:
name, L = hypo
M = None
C = None
elif len(hypo) == 3:
name, L, M = hypo
C = None
elif len(hypo) == 4:
name, L, M, C = hypo
else:
raise ValueError('hypotheses must be a tuple of length 2, 3 or 4.'
' len(hypotheses)=%d' % len(hypo))
mgr = FormulaManager()
if any(isinstance(j, str) for j in L):
L = mgr.get_linear_constraints(L, variable_names=exog_names).constraint_matrix
else:
if not isinstance(L, np.ndarray) or len(L.shape) != 2:
raise ValueError('Contrast matrix L must be a 2-d array!')
if L.shape[1] != k_xvar:
raise ValueError('Contrast matrix L should have the same '
'number of columns as exog! %d != %d' %
(L.shape[1], k_xvar))
if M is None:
M = np.eye(k_yvar)
elif any(isinstance(j, str) for j in M):
M = mgr.get_linear_constraints(M, variable_names=endog_names).constraint_matrix.T
else:
if M is not None:
if not isinstance(M, np.ndarray) or len(M.shape) != 2:
raise ValueError('Transform matrix M must be a 2-d array!')
if M.shape[0] != k_yvar:
raise ValueError('Transform matrix M should have the same '
'number of rows as the number of columns '
'of endog! %d != %d' %
(M.shape[0], k_yvar))
if C is None:
C = np.zeros([L.shape[0], M.shape[1]])
elif not isinstance(C, np.ndarray):
raise ValueError('Constant matrix C must be a 2-d array!')
if C.shape[0] != L.shape[0]:
raise ValueError('contrast L and constant C must have the same '
'number of rows! %d!=%d'
% (L.shape[0], C.shape[0]))
if C.shape[1] != M.shape[1]:
raise ValueError('transform M and constant C must have the same '
'number of columns! %d!=%d'
% (M.shape[1], C.shape[1]))
E, H, q, df_resid = fn(L, M, C)
EH = np.add(E, H)
p = matrix_rank(EH)
# eigenvalues of inv(E + H)H
eigv2 = np.sort(eigvals(solve(EH, H)))
stat_table = multivariate_stats(eigv2, p, q, df_resid)
results[name] = {'stat': stat_table, 'contrast_L': L,
'transform_M': M, 'constant_C': C,
'E': E, 'H': H}
return results | Multivariate linear model hypotheses testing
For y = x * params, where y are the dependent variables and x are the
independent variables, testing L * params * M = 0 where L is the contrast
matrix for hypotheses testing and M is the transformation matrix for
transforming the dependent variables in y.
Algorithm:
T = L*inv(X'X)*L'
H = M'B'L'*inv(T)*LBM
E = M'(Y'Y - B'X'XB)M
where H and E correspond to the numerator and denominator of a univariate
F-test. Then find the eigenvalues of inv(H + E)*H from which the
multivariate test statistics are calculated.
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML
/default/viewer.htm#statug_introreg_sect012.htm
Parameters
----------
%(hypotheses_doc)s
k_xvar : int
The number of independent variables
k_yvar : int
The number of dependent variables
fn : function
a function fn(contrast_L, transform_M) that returns E, H, q, df_resid
where q is the rank of T matrix
Returns
-------
results : MANOVAResults | _multivariate_test | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def mv_test(self, hypotheses=None, skip_intercept_test=False):
"""
Linear hypotheses testing
Parameters
----------
%(hypotheses_doc)s
skip_intercept_test : bool
If true, then testing the intercept is skipped, the model is not
changed.
Note: If a term has a numerically insignificant effect, then
an exception because of emtpy arrays may be raised. This can
happen for the intercept if the data has been demeaned.
Returns
-------
results: _MultivariateOLSResults
Notes
-----
Tests hypotheses of the form
L * params * M = C
where `params` is the regression coefficient matrix for the
linear model y = x * params, `L` is the contrast matrix, `M` is the
dependent variable transform matrix and C is the constant matrix.
"""
mgr = FormulaManager()
k_xvar = len(self.exog_names)
if hypotheses is None:
if self.model_spec is not None:
terms = mgr.get_term_name_slices(self.model_spec)
hypotheses = []
for key in terms:
if skip_intercept_test and (key == 'Intercept' or key == mgr.intercept_term):
continue
L_contrast = np.eye(k_xvar)[terms[key], :]
test_name = str(key)
if key == mgr.intercept_term:
test_name = 'Intercept'
hypotheses.append([test_name, L_contrast, None])
else:
hypotheses = []
for i in range(k_xvar):
name = 'x%d' % (i)
L = np.zeros([1, k_xvar])
L[0, i] = 1
hypotheses.append([name, L, None])
results = _multivariate_ols_test(hypotheses, self._fittedmod,
self.exog_names, self.endog_names)
return MultivariateTestResults(results,
self.endog_names,
self.exog_names) | Linear hypotheses testing
Parameters
----------
%(hypotheses_doc)s
skip_intercept_test : bool
If true, then testing the intercept is skipped, the model is not
changed.
Note: If a term has a numerically insignificant effect, then
an exception because of emtpy arrays may be raised. This can
happen for the intercept if the data has been demeaned.
Returns
-------
results: _MultivariateOLSResults
Notes
-----
Tests hypotheses of the form
L * params * M = C
where `params` is the regression coefficient matrix for the
linear model y = x * params, `L` is the contrast matrix, `M` is the
dependent variable transform matrix and C is the constant matrix. | mv_test | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def _hat_matrix_diag(self):
"""Diagonal of the hat_matrix for OLS
Notes
-----
temporarily calculated here, this should go to model or influence class
"""
# computation base on OLSInfluence method
exog = self.model.exog
pinv_wexog = np.linalg.pinv(exog)
return (exog * pinv_wexog.T).sum(1) | Diagonal of the hat_matrix for OLS
Notes
-----
temporarily calculated here, this should go to model or influence class | _hat_matrix_diag | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
"""
Summarize the Regression Results.
Parameters
----------
yname : str, optional
The name of the endog variable in the tables. The default is `y`.
xname : list[str], optional
The names for the exogenous variables, default is "var_xx".
Must match the number of parameters in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
Returns
-------
Summary
Class that holds the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : Class that hold summary results.
"""
# used in generic part of io summary
self.nobs = self.model.nobs
self.df_model = self.model.k_endog * (self.model.k_exog - 1)
top_left = [('Dep. Variable:', None),
('Model:', [self.model.__class__.__name__]),
('Method:', [self.method]),
('Date:', None),
('Time:', None),
# ('converged:', ["%s" % self.mle_retvals['converged']]),
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
# ('Pseudo R-squ.:', ["%#6.4g" % self.prsquared]),
# ('Log-Likelihood:', None),
# ('LL-Null:', ["%#8.5g" % self.llnull]),
# ('LLR p-value:', ["%#6.4g" % self.llr_pvalue])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
# boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
yname, yname_list = self._get_endog_name(yname, yname_list)
# for top of table
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
# for parameters, etc
smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha,
use_t=self.use_t)
if hasattr(self, 'constraints'):
smry.add_extra_txt(['Model has been estimated subject to linear '
'equality constraints.'])
return smry | Summarize the Regression Results.
Parameters
----------
yname : str, optional
The name of the endog variable in the tables. The default is `y`.
xname : list[str], optional
The names for the exogenous variables, default is "var_xx".
Must match the number of parameters in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
Returns
-------
Summary
Class that holds the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : Class that hold summary results. | summary | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def summary_frame(self):
"""
Return results as a multiindex dataframe
"""
df = []
for key in self.results:
tmp = self.results[key]['stat'].copy()
tmp.loc[:, 'Effect'] = key
df.append(tmp.reset_index())
df = pd.concat(df, axis=0)
df = df.set_index(['Effect', 'index'])
df.index.set_names(['Effect', 'Statistic'], inplace=True)
return df | Return results as a multiindex dataframe | summary_frame | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def summary(self, show_contrast_L=False, show_transform_M=False,
show_constant_C=False):
"""
Summary of test results
Parameters
----------
show_contrast_L : bool
Whether to show contrast_L matrix
show_transform_M : bool
Whether to show transform_M matrix
show_constant_C : bool
Whether to show the constant_C
"""
summ = summary2.Summary()
summ.add_title('Multivariate linear model')
for key in self.results:
summ.add_dict({'': ''})
df = self.results[key]['stat'].copy()
df = df.reset_index()
c = list(df.columns)
c[0] = key
df.columns = c
df.index = ['', '', '', '']
summ.add_df(df)
if show_contrast_L:
summ.add_dict({key: ' contrast L='})
df = pd.DataFrame(self.results[key]['contrast_L'],
columns=self.exog_names)
summ.add_df(df)
if show_transform_M:
summ.add_dict({key: ' transform M='})
df = pd.DataFrame(self.results[key]['transform_M'],
index=self.endog_names)
summ.add_df(df)
if show_constant_C:
summ.add_dict({key: ' constant C='})
df = pd.DataFrame(self.results[key]['constant_C'])
summ.add_df(df)
return summ | Summary of test results
Parameters
----------
show_contrast_L : bool
Whether to show contrast_L matrix
show_transform_M : bool
Whether to show transform_M matrix
show_constant_C : bool
Whether to show the constant_C | summary | python | statsmodels/statsmodels | statsmodels/multivariate/multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/multivariate_ols.py | BSD-3-Clause |
def test_1factor():
"""
# R code:
r = 0.4
p = 4
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
fa = factanal(covmat=cm, factors=1)
print(fa, digits=10)
"""
r = 0.4
p = 4
ii = np.arange(p)
cm = r ** np.abs(np.subtract.outer(ii, ii))
fa = Factor(corr=cm, n_factor=1, method='ml')
rslt = fa.fit()
if rslt.loadings[0, 0] < 0:
rslt.loadings[:, 0] *= -1
# R solution, but our likelihood is higher
# uniq = np.r_[0.8392472054, 0.5820958187, 0.5820958187, 0.8392472054]
# load = np.asarray([[0.4009399224, 0.6464550935, 0.6464550935,
# 0.4009399224]]).T
# l1 = fa.loglike(fa._pack(load, uniq))
# l2 = fa.loglike(fa._pack(rslt.loadings, rslt.uniqueness))
# So use a smoke test
uniq = np.r_[0.85290232, 0.60916033, 0.55382266, 0.82610666]
load = np.asarray([[0.38353316], [0.62517171], [0.66796508],
[0.4170052]])
assert_allclose(load, rslt.loadings, rtol=1e-3, atol=1e-3)
assert_allclose(uniq, rslt.uniqueness, rtol=1e-3, atol=1e-3)
assert_equal(rslt.df, 2) | # R code:
r = 0.4
p = 4
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
fa = factanal(covmat=cm, factors=1)
print(fa, digits=10) | test_1factor | python | statsmodels/statsmodels | statsmodels/multivariate/tests/test_ml_factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/tests/test_ml_factor.py | BSD-3-Clause |
def test_2factor():
"""
# R code:
r = 0.4
p = 6
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
factanal(covmat=cm, factors=2)
"""
r = 0.4
p = 6
ii = np.arange(p)
cm = r ** np.abs(np.subtract.outer(ii, ii))
fa = Factor(corr=cm, n_factor=2, nobs=100, method='ml')
rslt = fa.fit()
for j in 0, 1:
if rslt.loadings[0, j] < 0:
rslt.loadings[:, j] *= -1
uniq = np.r_[0.782, 0.367, 0.696, 0.696, 0.367, 0.782]
assert_allclose(uniq, rslt.uniqueness, rtol=1e-3, atol=1e-3)
loads = [np.r_[0.323, 0.586, 0.519, 0.519, 0.586, 0.323],
np.r_[0.337, 0.538, 0.187, -0.187, -0.538, -0.337]]
for k in 0, 1:
if np.dot(loads[k], rslt.loadings[:, k]) < 0:
loads[k] *= -1
assert_allclose(loads[k], rslt.loadings[:, k], rtol=1e-3, atol=1e-3)
assert_equal(rslt.df, 4)
# Smoke test for standard errors
e = np.asarray([0.11056836, 0.05191071, 0.09836349,
0.09836349, 0.05191071, 0.11056836])
assert_allclose(rslt.uniq_stderr, e, atol=1e-4)
e = np.asarray([[0.08842151, 0.08842151], [0.06058582, 0.06058582],
[0.08339874, 0.08339874], [0.08339874, 0.08339874],
[0.06058582, 0.06058582], [0.08842151, 0.08842151]])
assert_allclose(rslt.load_stderr, e, atol=1e-4) | # R code:
r = 0.4
p = 6
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
factanal(covmat=cm, factors=2) | test_2factor | python | statsmodels/statsmodels | statsmodels/multivariate/tests/test_ml_factor.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/tests/test_ml_factor.py | BSD-3-Clause |
def test_manova_test_input_validation():
mod = MANOVA.from_formula('Basal + Occ + Max ~ Loc', data=X)
hypothesis = [('test', np.array([[1, 1, 1]]), None)]
mod.mv_test(hypothesis)
hypothesis = [('test', np.array([[1, 1]]), None)]
assert_raises(ValueError, mod.mv_test, hypothesis)
"""
assert_raises_regex(ValueError,
('Contrast matrix L should have the same number of '
'columns as exog! 2 != 3'),
mod.mv_test, hypothesis)
"""
hypothesis = [('test', np.array([[1, 1, 1]]), np.array([[1], [1], [1]]))]
mod.mv_test(hypothesis)
hypothesis = [('test', np.array([[1, 1, 1]]), np.array([[1], [1]]))]
assert_raises(ValueError, mod.mv_test, hypothesis)
"""
assert_raises_regex(ValueError,
('Transform matrix M should have the same number of '
'rows as the number of columns of endog! 2 != 3'),
mod.mv_test, hypothesis)
""" | assert_raises_regex(ValueError,
('Contrast matrix L should have the same number of '
'columns as exog! 2 != 3'),
mod.mv_test, hypothesis) | test_manova_test_input_validation | python | statsmodels/statsmodels | statsmodels/multivariate/tests/test_manova.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/tests/test_manova.py | BSD-3-Clause |
def compare_r_output_dogs_data(method, model):
''' Testing within-subject effect interact with 2 between-subject effect
Compares with R car library Anova(, type=3) output
Note: The test statistis Phillai, Wilks, Hotelling-Lawley
and Roy are the same as R output but the approximate F and degree
of freedoms can be different. This is due to the fact that this
implementation is based on SAS formula [1]
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/
viewer.htm#statug_introreg_sect012.htm
'''
# Repeated measures with orthogonal polynomial contrasts coding
mod = model.from_formula(
'Histamine0 + Histamine1 + Histamine3 + Histamine5 ~ Drug * Depleted',
data)
r = mod.fit(method=method)
r = r.mv_test()
a = [[2.68607660e-02, 4, 6, 5.43435304e+01, 7.59585610e-05],
[9.73139234e-01, 4, 6, 5.43435304e+01, 7.59585610e-05],
[3.62290202e+01, 4, 6, 5.43435304e+01, 7.59585610e-05],
[3.62290202e+01, 4, 6, 5.43435304e+01, 7.59585610e-05]]
assert_array_almost_equal(r['Intercept']['stat'].values, a, decimal=6)
a = [[8.39646619e-02, 8, 1.20000000e+01, 3.67658068e+00, 2.12614444e-02],
[1.18605382e+00, 8, 1.40000000e+01, 2.55003861e+00, 6.01270701e-02],
[7.69391362e+00, 8, 6.63157895e+00, 5.50814270e+00, 2.07392260e-02],
[7.25036952e+00, 4, 7.00000000e+00, 1.26881467e+01, 2.52669877e-03]]
assert_array_almost_equal(r['Drug']['stat'].values, a, decimal=6)
a = [[0.32048892, 4., 6., 3.18034906, 0.10002373],
[0.67951108, 4., 6., 3.18034906, 0.10002373],
[2.12023271, 4., 6., 3.18034906, 0.10002373],
[2.12023271, 4., 6., 3.18034906, 0.10002373]]
assert_array_almost_equal(r['Depleted']['stat'].values, a, decimal=6)
a = [[0.15234366, 8., 12., 2.34307678, 0.08894239],
[1.13013353, 8., 14., 2.27360606, 0.08553213],
[3.70989596, 8., 6.63157895, 2.65594824, 0.11370285],
[3.1145597, 4., 7., 5.45047947, 0.02582767]]
assert_array_almost_equal(r['Drug:Depleted']['stat'].values, a, decimal=6) | Testing within-subject effect interact with 2 between-subject effect
Compares with R car library Anova(, type=3) output
Note: The test statistis Phillai, Wilks, Hotelling-Lawley
and Roy are the same as R output but the approximate F and degree
of freedoms can be different. This is due to the fact that this
implementation is based on SAS formula [1]
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/
viewer.htm#statug_introreg_sect012.htm | compare_r_output_dogs_data | python | statsmodels/statsmodels | statsmodels/multivariate/tests/test_multivariate_ols.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/tests/test_multivariate_ols.py | BSD-3-Clause |
def Gf(T, ff):
"""
Subroutine for the gradient of f using numerical derivatives.
"""
k = T.shape[0]
ep = 1e-4
G = np.zeros((k, k))
for r in range(k):
for s in range(k):
dT = np.zeros((k, k))
dT[r, s] = ep
G[r, s] = (ff(T+dT)-ff(T-dT))/(2*ep)
return G | Subroutine for the gradient of f using numerical derivatives. | Gf | python | statsmodels/statsmodels | statsmodels/multivariate/factor_rotation/_gpa_rotation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/_gpa_rotation.py | BSD-3-Clause |
def test_target_rotation(self):
"""
Rotation towards target matrix example
http://www.stat.ucla.edu/research/gpa
"""
A = self.str2matrix("""
.830 -.396
.818 -.469
.777 -.470
.798 -.401
.786 .500
.672 .458
.594 .444
.647 .333
""")
H = self.str2matrix("""
.8 -.3
.8 -.4
.7 -.4
.9 -.4
.8 .5
.6 .4
.5 .4
.6 .3
""")
T = target_rotation(A, H)
L = A.dot(T)
L_required = self.str2matrix("""
0.84168 -0.37053
0.83191 -0.44386
0.79096 -0.44611
0.80985 -0.37650
0.77040 0.52371
0.65774 0.47826
0.58020 0.46189
0.63656 0.35255
""")
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
T = target_rotation(A, H, full_rank=True)
L = A.dot(T)
self.assertTrue(np.allclose(L, L_required, atol=1e-05)) | Rotation towards target matrix example
http://www.stat.ucla.edu/research/gpa | test_target_rotation | python | statsmodels/statsmodels | statsmodels/multivariate/factor_rotation/tests/test_rotation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py | BSD-3-Clause |
def get_quartimin_example(cls):
A = cls.get_A()
table_required = cls.str2matrix("""
0.00000 0.42806 -0.46393 1.00000
1.00000 0.41311 -0.57313 0.25000
2.00000 0.38238 -0.36652 0.50000
3.00000 0.31850 -0.21011 0.50000
4.00000 0.20937 -0.13838 0.50000
5.00000 0.12379 -0.35583 0.25000
6.00000 0.04289 -0.53244 0.50000
7.00000 0.01098 -0.86649 0.50000
8.00000 0.00566 -1.65798 0.50000
9.00000 0.00558 -2.13212 0.25000
10.00000 0.00557 -2.49020 0.25000
11.00000 0.00557 -2.84585 0.25000
12.00000 0.00557 -3.20320 0.25000
13.00000 0.00557 -3.56143 0.25000
14.00000 0.00557 -3.92005 0.25000
15.00000 0.00557 -4.27885 0.25000
16.00000 0.00557 -4.63772 0.25000
17.00000 0.00557 -4.99663 0.25000
18.00000 0.00557 -5.35555 0.25000
""")
L_required = cls.str2matrix("""
0.891822 0.056015
0.953680 -0.023246
0.929150 -0.046503
0.876683 0.033658
0.013701 0.925000
-0.017265 0.821253
-0.052445 0.764953
0.085890 0.683115
""")
return A, table_required, L_required | )
L_required = cls.str2matrix( | get_quartimin_example | python | statsmodels/statsmodels | statsmodels/multivariate/factor_rotation/tests/test_rotation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py | BSD-3-Clause |
def test_orthomax(self):
"""
Quartimax example
http://www.stat.ucla.edu/research/gpa
"""
A = self.get_A()
def vgQ(L=None, A=None, T=None):
return orthomax_objective(L=L, A=A, T=T, gamma=0, return_gradient=True)
L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal')
table_required = self.str2matrix("""
0.00000 -0.72073 -0.65498 1.00000
1.00000 -0.88561 -0.34614 2.00000
2.00000 -1.01992 -1.07152 1.00000
3.00000 -1.02237 -1.51373 0.50000
4.00000 -1.02269 -1.96205 0.50000
5.00000 -1.02273 -2.41116 0.50000
6.00000 -1.02273 -2.86037 0.50000
7.00000 -1.02273 -3.30959 0.50000
8.00000 -1.02273 -3.75881 0.50000
9.00000 -1.02273 -4.20804 0.50000
10.00000 -1.02273 -4.65726 0.50000
11.00000 -1.02273 -5.10648 0.50000
""")
L_required = self.str2matrix("""
0.89876 0.19482
0.93394 0.12974
0.90213 0.10386
0.87651 0.17128
0.31558 0.87647
0.25113 0.77349
0.19801 0.71468
0.30786 0.65933
""")
self.assertTrue(np.allclose(table, table_required, atol=1e-05))
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
# oblimin criterion gives same result
def vgQ(L=None, A=None, T=None):
return oblimin_objective(L=L, A=A, T=T, gamma=0, rotation_method='orthogonal', return_gradient=True)
L_oblimin, phi2, T2, table2 = GPA(A, vgQ=vgQ,
rotation_method='orthogonal')
self.assertTrue(np.allclose(L, L_oblimin, atol=1e-05))
# derivative free quartimax
out = self.get_quartimax_example_derivative_free()
A, table_required, L_required = out
def ff(L=None, A=None, T=None):
return orthomax_objective(L=L, A=A, T=T, gamma=0, return_gradient=False)
L, phi, T, table = GPA(A, ff=ff, rotation_method='orthogonal')
self.assertTrue(np.allclose(table, table_required, atol=1e-05))
self.assertTrue(np.allclose(L, L_required, atol=1e-05)) | Quartimax example
http://www.stat.ucla.edu/research/gpa | test_orthomax | python | statsmodels/statsmodels | statsmodels/multivariate/factor_rotation/tests/test_rotation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py | BSD-3-Clause |
def test_equivalence_orthomax_oblimin(self):
"""
These criteria should be equivalent when restricted to orthogonal
rotation.
See Hartman 1976 page 299.
"""
A = self.get_A()
gamma = 0 # quartimax
def vgQ(L=None, A=None, T=None):
return orthomax_objective(L=L, A=A, T=T, gamma=gamma, return_gradient=True)
L_orthomax, phi, T, table = GPA(
A, vgQ=vgQ, rotation_method='orthogonal')
def vgQ(L=None, A=None, T=None):
return oblimin_objective(L=L, A=A, T=T, gamma=gamma, rotation_method='orthogonal', return_gradient=True)
L_oblimin, phi2, T2, table2 = GPA(A, vgQ=vgQ,
rotation_method='orthogonal')
self.assertTrue(np.allclose(L_orthomax, L_oblimin, atol=1e-05))
gamma = 1 # varimax
def vgQ(L=None, A=None, T=None):
return orthomax_objective(L=L, A=A, T=T, gamma=gamma, return_gradient=True)
L_orthomax, phi, T, table = GPA(
A, vgQ=vgQ, rotation_method='orthogonal')
def vgQ(L=None, A=None, T=None):
return oblimin_objective(L=L, A=A, T=T, gamma=gamma, rotation_method='orthogonal', return_gradient=True)
L_oblimin, phi2, T2, table2 = GPA(
A, vgQ=vgQ, rotation_method='orthogonal')
self.assertTrue(np.allclose(L_orthomax, L_oblimin, atol=1e-05)) | These criteria should be equivalent when restricted to orthogonal
rotation.
See Hartman 1976 page 299. | test_equivalence_orthomax_oblimin | python | statsmodels/statsmodels | statsmodels/multivariate/factor_rotation/tests/test_rotation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py | BSD-3-Clause |
def test_methods(self):
"""
Quartimax derivative free example
http://www.stat.ucla.edu/research/gpa
"""
# orthomax, oblimin and CF are tested indirectly
methods = ['quartimin', 'biquartimin',
'quartimax', 'biquartimax', 'varimax', 'equamax',
'parsimax', 'parsimony',
'target', 'partial_target']
for method in methods:
method_args = []
if method == 'target':
method_args = [self.get_H(), 'orthogonal']
self._test_template(method, *method_args)
method_args = [self.get_H(), 'oblique']
self._test_template(method, *method_args)
method_args = [self.get_H(), 'orthogonal']
self._test_template(method, *method_args,
algorithm2='analytic')
elif method == 'partial_target':
method_args = [self.get_H(), self.get_W()]
self._test_template(method, *method_args) | Quartimax derivative free example
http://www.stat.ucla.edu/research/gpa | test_methods | python | statsmodels/statsmodels | statsmodels/multivariate/factor_rotation/tests/test_rotation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py | BSD-3-Clause |
def _faa_di_bruno_partitions(n):
"""
Return all non-negative integer solutions of the diophantine equation
n*k_n + ... + 2*k_2 + 1*k_1 = n (1)
Parameters
----------
n : int
the r.h.s. of Eq. (1)
Returns
-------
partitions : list
Each solution is itself a list of the form `[(m, k_m), ...]`
for non-zero `k_m`. Notice that the index `m` is 1-based.
Examples:
---------
>>> _faa_di_bruno_partitions(2)
[[(1, 2)], [(2, 1)]]
>>> for p in _faa_di_bruno_partitions(4):
... assert 4 == sum(m * k for (m, k) in p)
"""
if n < 1:
raise ValueError("Expected a positive integer; got %s instead" % n)
try:
return _faa_di_bruno_cache[n]
except KeyError:
# TODO: higher order terms
# solve Eq. (31) from Blinninkov & Moessner here
raise NotImplementedError('Higher order terms not yet implemented.') | Return all non-negative integer solutions of the diophantine equation
n*k_n + ... + 2*k_2 + 1*k_1 = n (1)
Parameters
----------
n : int
the r.h.s. of Eq. (1)
Returns
-------
partitions : list
Each solution is itself a list of the form `[(m, k_m), ...]`
for non-zero `k_m`. Notice that the index `m` is 1-based.
Examples:
---------
>>> _faa_di_bruno_partitions(2)
[[(1, 2)], [(2, 1)]]
>>> for p in _faa_di_bruno_partitions(4):
... assert 4 == sum(m * k for (m, k) in p) | _faa_di_bruno_partitions | python | statsmodels/statsmodels | statsmodels/distributions/edgeworth.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/edgeworth.py | BSD-3-Clause |
def cumulant_from_moments(momt, n):
"""Compute n-th cumulant given moments.
Parameters
----------
momt : array_like
`momt[j]` contains `(j+1)`-th moment.
These can be raw moments around zero, or central moments
(in which case, `momt[0]` == 0).
n : int
which cumulant to calculate (must be >1)
Returns
-------
kappa : float
n-th cumulant.
"""
if n < 1:
raise ValueError("Expected a positive integer. Got %s instead." % n)
if len(momt) < n:
raise ValueError("%s-th cumulant requires %s moments, "
"only got %s." % (n, n, len(momt)))
kappa = 0.
for p in _faa_di_bruno_partitions(n):
r = sum(k for (m, k) in p)
term = (-1)**(r - 1) * factorial(r - 1)
for (m, k) in p:
term *= np.power(momt[m - 1] / factorial(m), k) / factorial(k)
kappa += term
kappa *= factorial(n)
return kappa | Compute n-th cumulant given moments.
Parameters
----------
momt : array_like
`momt[j]` contains `(j+1)`-th moment.
These can be raw moments around zero, or central moments
(in which case, `momt[0]` == 0).
n : int
which cumulant to calculate (must be >1)
Returns
-------
kappa : float
n-th cumulant. | cumulant_from_moments | python | statsmodels/statsmodels | statsmodels/distributions/edgeworth.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/edgeworth.py | BSD-3-Clause |
def prob2cdf_grid(probs):
"""Cumulative probabilities from cell provabilites on a grid
Parameters
----------
probs : array_like
Rectangular grid of cell probabilities.
Returns
-------
cdf : ndarray
Grid of cumulative probabilities with same shape as probs.
"""
cdf = np.asarray(probs).copy()
k = cdf.ndim
for i in range(k):
cdf = cdf.cumsum(axis=i)
return cdf | Cumulative probabilities from cell provabilites on a grid
Parameters
----------
probs : array_like
Rectangular grid of cell probabilities.
Returns
-------
cdf : ndarray
Grid of cumulative probabilities with same shape as probs. | prob2cdf_grid | python | statsmodels/statsmodels | statsmodels/distributions/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py | BSD-3-Clause |
def cdf2prob_grid(cdf, prepend=0):
"""Cell probabilities from cumulative probabilities on a grid.
Parameters
----------
cdf : array_like
Grid of cumulative probabilities with same shape as probs.
Returns
-------
probs : ndarray
Rectangular grid of cell probabilities.
"""
if prepend is None:
prepend = np._NoValue
prob = np.asarray(cdf).copy()
k = prob.ndim
for i in range(k):
prob = np.diff(prob, prepend=prepend, axis=i)
return prob | Cell probabilities from cumulative probabilities on a grid.
Parameters
----------
cdf : array_like
Grid of cumulative probabilities with same shape as probs.
Returns
-------
probs : ndarray
Rectangular grid of cell probabilities. | cdf2prob_grid | python | statsmodels/statsmodels | statsmodels/distributions/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py | BSD-3-Clause |
def average_grid(values, coords=None, _method="slicing"):
"""Compute average for each cell in grid using endpoints
Parameters
----------
values : array_like
Values on a grid that will average over corner points of each cell.
coords : None or list of array_like
Grid coordinates for each axis use to compute volumne of cell.
If None, then averaged values are not rescaled.
_method : {"slicing", "convolve"}
Grid averaging is implemented using numpy "slicing" or using
scipy.signal "convolve".
Returns
-------
Grid with averaged cell values.
"""
k_dim = values.ndim
if _method == "slicing":
p = values.copy()
for d in range(k_dim):
# average (p[:-1] + p[1:]) / 2 over each axis
sl1 = [slice(None, None, None)] * k_dim
sl2 = [slice(None, None, None)] * k_dim
sl1[d] = slice(None, -1, None)
sl2[d] = slice(1, None, None)
sl1 = tuple(sl1)
sl2 = tuple(sl2)
p = (p[sl1] + p[sl2]) / 2
elif _method == "convolve":
from scipy import signal
p = signal.convolve(values, 0.5**k_dim * np.ones([2] * k_dim),
mode="valid")
if coords is not None:
dx = np.array(1)
for d in range(k_dim):
dx = dx[..., None] * np.diff(coords[d])
p = p * dx
return p | Compute average for each cell in grid using endpoints
Parameters
----------
values : array_like
Values on a grid that will average over corner points of each cell.
coords : None or list of array_like
Grid coordinates for each axis use to compute volumne of cell.
If None, then averaged values are not rescaled.
_method : {"slicing", "convolve"}
Grid averaging is implemented using numpy "slicing" or using
scipy.signal "convolve".
Returns
-------
Grid with averaged cell values. | average_grid | python | statsmodels/statsmodels | statsmodels/distributions/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py | BSD-3-Clause |
def nearest_matrix_margins(mat, maxiter=100, tol=1e-8):
"""nearest matrix with uniform margins
Parameters
----------
mat : array_like, 2-D
Matrix that will be converted to have uniform margins.
Currently, `mat` has to be two dimensional.
maxiter : in
Maximum number of iterations.
tol : float
Tolerance for convergence, defined for difference between largest and
smallest margin in each dimension.
Returns
-------
ndarray, nearest matrix with uniform margins.
Notes
-----
This function is intended for internal use and will be generalized in
future. API will change.
changed in 0.14 to support k_dim > 2.
"""
pc = np.asarray(mat)
converged = False
for _ in range(maxiter):
pc0 = pc.copy()
for ax in range(pc.ndim):
axs = tuple([i for i in range(pc.ndim) if not i == ax])
pc0 /= pc.sum(axis=axs, keepdims=True)
pc = pc0
pc /= pc.sum()
# check convergence
mptps = []
for ax in range(pc.ndim):
axs = tuple([i for i in range(pc.ndim) if not i == ax])
marg = pc.sum(axis=axs, keepdims=False)
mptps.append(np.ptp(marg))
if max(mptps) < tol:
converged = True
break
if not converged:
from statsmodels.tools.sm_exceptions import ConvergenceWarning
warnings.warn("Iterations did not converge, maxiter reached",
ConvergenceWarning)
return pc | nearest matrix with uniform margins
Parameters
----------
mat : array_like, 2-D
Matrix that will be converted to have uniform margins.
Currently, `mat` has to be two dimensional.
maxiter : in
Maximum number of iterations.
tol : float
Tolerance for convergence, defined for difference between largest and
smallest margin in each dimension.
Returns
-------
ndarray, nearest matrix with uniform margins.
Notes
-----
This function is intended for internal use and will be generalized in
future. API will change.
changed in 0.14 to support k_dim > 2. | nearest_matrix_margins | python | statsmodels/statsmodels | statsmodels/distributions/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py | BSD-3-Clause |
def _rankdata_no_ties(x):
"""rankdata without ties for 2-d array
This is a simplified version for ranking data if there are no ties.
Works vectorized across columns.
See Also
--------
scipy.stats.rankdata
"""
nobs, k_vars = x.shape
ranks = np.ones((nobs, k_vars))
sidx = np.argsort(x, axis=0)
ranks[sidx, np.arange(k_vars)] = np.arange(1, nobs + 1)[:, None]
return ranks | rankdata without ties for 2-d array
This is a simplified version for ranking data if there are no ties.
Works vectorized across columns.
See Also
--------
scipy.stats.rankdata | _rankdata_no_ties | python | statsmodels/statsmodels | statsmodels/distributions/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py | BSD-3-Clause |
def frequencies_fromdata(data, k_bins, use_ranks=True):
"""count of observations in bins (histogram)
currently only for bivariate data
Parameters
----------
data : array_like
Bivariate data with observations in rows and two columns. Binning is
in unit rectangle [0, 1]^2. If use_rank is False, then data should be
in unit interval.
k_bins : int
Number of bins along each dimension in the histogram
use_ranks : bool
If use_rank is True, then data will be converted to ranks without
tie handling.
Returns
-------
bin counts : ndarray
Frequencies are the number of observations in a given bin.
Bin counts are a 2-dim array with k_bins rows and k_bins columns.
Notes
-----
This function is intended for internal use and will be generalized in
future. API will change.
"""
data = np.asarray(data)
k_dim = data.shape[-1]
k = k_bins + 1
g2 = _Grid([k] * k_dim, eps=0)
if use_ranks:
data = _rankdata_no_ties(data) / (data.shape[0] + 1)
# alternatives: scipy handles ties, but uses np.apply_along_axis
# rvs = stats.rankdata(rvs, axis=0) / (rvs.shape[0] + 1)
# rvs = (np.argsort(np.argsort(rvs, axis=0), axis=0) + 1
# ) / (rvs.shape[0] + 1)
freqr, _ = np.histogramdd(data, bins=g2.x_marginal)
return freqr | count of observations in bins (histogram)
currently only for bivariate data
Parameters
----------
data : array_like
Bivariate data with observations in rows and two columns. Binning is
in unit rectangle [0, 1]^2. If use_rank is False, then data should be
in unit interval.
k_bins : int
Number of bins along each dimension in the histogram
use_ranks : bool
If use_rank is True, then data will be converted to ranks without
tie handling.
Returns
-------
bin counts : ndarray
Frequencies are the number of observations in a given bin.
Bin counts are a 2-dim array with k_bins rows and k_bins columns.
Notes
-----
This function is intended for internal use and will be generalized in
future. API will change. | frequencies_fromdata | python | statsmodels/statsmodels | statsmodels/distributions/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py | BSD-3-Clause |
def approx_copula_pdf(copula, k_bins=10, force_uniform=True, use_pdf=False):
"""Histogram probabilities as approximation to a copula density.
Parameters
----------
copula : instance
Instance of a copula class. Only the ``pdf`` method is used.
k_bins : int
Number of bins along each dimension in the approximating histogram.
force_uniform : bool
If true, then the pdf grid will be adjusted to have uniform margins
using `nearest_matrix_margin`.
If false, then no adjustment is done and the margins may not be exactly
uniform.
use_pdf : bool
If false, then the grid cell probabilities will be computed from the
copula cdf.
If true, then the density, ``pdf``, is used and cell probabilities
are approximated by averaging the pdf of the cell corners. This is
only useful if the cdf is not available.
Returns
-------
bin probabilites : ndarray
Probability that random variable falls in given bin. This corresponds
to a discrete distribution, and is not scaled to bin size to form a
piecewise uniform, histogram density.
Bin probabilities are a k-dim array with k_bins segments in each
dimensionrows.
Notes
-----
This function is intended for internal use and will be generalized in
future. API will change.
"""
k_dim = copula.k_dim
k = k_bins + 1
ks = tuple([k] * k_dim)
if use_pdf:
g = _Grid([k] * k_dim, eps=0.1 / k_bins)
pdfg = copula.pdf(g.x_flat).reshape(*ks)
# correct for bin size
pdfg *= 1 / k**k_dim
ag = average_grid(pdfg)
if force_uniform:
pdf_grid = nearest_matrix_margins(ag, maxiter=100, tol=1e-8)
else:
pdf_grid = ag / ag.sum()
else:
g = _Grid([k] * k_dim, eps=1e-6)
cdfg = copula.cdf(g.x_flat).reshape(*ks)
# correct for bin size
pdf_grid = cdf2prob_grid(cdfg, prepend=None)
# TODO: check boundary approximation, eg. undefined at zero
# for now just normalize
pdf_grid /= pdf_grid.sum()
return pdf_grid | Histogram probabilities as approximation to a copula density.
Parameters
----------
copula : instance
Instance of a copula class. Only the ``pdf`` method is used.
k_bins : int
Number of bins along each dimension in the approximating histogram.
force_uniform : bool
If true, then the pdf grid will be adjusted to have uniform margins
using `nearest_matrix_margin`.
If false, then no adjustment is done and the margins may not be exactly
uniform.
use_pdf : bool
If false, then the grid cell probabilities will be computed from the
copula cdf.
If true, then the density, ``pdf``, is used and cell probabilities
are approximated by averaging the pdf of the cell corners. This is
only useful if the cdf is not available.
Returns
-------
bin probabilites : ndarray
Probability that random variable falls in given bin. This corresponds
to a discrete distribution, and is not scaled to bin size to form a
piecewise uniform, histogram density.
Bin probabilities are a k-dim array with k_bins segments in each
dimensionrows.
Notes
-----
This function is intended for internal use and will be generalized in
future. API will change. | approx_copula_pdf | python | statsmodels/statsmodels | statsmodels/distributions/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py | BSD-3-Clause |
def _eval_bernstein_1d(x, fvals, method="binom"):
"""Evaluate 1-dimensional bernstein polynomial given grid of values.
experimental, comparing methods
Parameters
----------
x : array_like
Values at which to evaluate the Bernstein polynomial.
fvals : ndarray
Grid values of coefficients for Bernstein polynomial basis in the
weighted sum.
method: "binom", "beta" or "bpoly"
Method to construct Bernstein polynomial basis, used for comparison
of parameterizations.
- "binom" uses pmf of Binomial distribution
- "beta" uses pdf of Beta distribution
- "bpoly" uses one interval in scipy.interpolate.BPoly
Returns
-------
Bernstein polynomial at evaluation points, weighted sum of Bernstein
polynomial basis.
"""
k_terms = fvals.shape[-1]
xx = np.asarray(x)
k = np.arange(k_terms).astype(float)
n = k_terms - 1.
if method.lower() == "binom":
# Divide by 0 RuntimeWarning here
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
poly_base = stats.binom.pmf(k, n, xx[..., None])
bp_values = (fvals * poly_base).sum(-1)
elif method.lower() == "bpoly":
bpb = interpolate.BPoly(fvals[:, None], [0., 1])
bp_values = bpb(x)
elif method.lower() == "beta":
# Divide by 0 RuntimeWarning here
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
poly_base = stats.beta.pdf(xx[..., None], k + 1, n - k + 1) / (n + 1)
bp_values = (fvals * poly_base).sum(-1)
else:
raise ValueError("method not recogized")
return bp_values | Evaluate 1-dimensional bernstein polynomial given grid of values.
experimental, comparing methods
Parameters
----------
x : array_like
Values at which to evaluate the Bernstein polynomial.
fvals : ndarray
Grid values of coefficients for Bernstein polynomial basis in the
weighted sum.
method: "binom", "beta" or "bpoly"
Method to construct Bernstein polynomial basis, used for comparison
of parameterizations.
- "binom" uses pmf of Binomial distribution
- "beta" uses pdf of Beta distribution
- "bpoly" uses one interval in scipy.interpolate.BPoly
Returns
-------
Bernstein polynomial at evaluation points, weighted sum of Bernstein
polynomial basis. | _eval_bernstein_1d | python | statsmodels/statsmodels | statsmodels/distributions/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py | BSD-3-Clause |
def _eval_bernstein_2d(x, fvals):
"""Evaluate 2-dimensional bernstein polynomial given grid of values
experimental
Parameters
----------
x : array_like
Values at which to evaluate the Bernstein polynomial.
fvals : ndarray
Grid values of coefficients for Bernstein polynomial basis in the
weighted sum.
Returns
-------
Bernstein polynomial at evaluation points, weighted sum of Bernstein
polynomial basis.
"""
k_terms = fvals.shape
k_dim = fvals.ndim
if k_dim != 2:
raise ValueError("`fval` needs to be 2-dimensional")
xx = np.atleast_2d(x)
if xx.shape[1] != 2:
raise ValueError("x needs to be bivariate and have 2 columns")
x1, x2 = xx.T
n1, n2 = k_terms[0] - 1, k_terms[1] - 1
k1 = np.arange(k_terms[0]).astype(float)
k2 = np.arange(k_terms[1]).astype(float)
# we are building a nobs x n1 x n2 array
poly_base = (stats.binom.pmf(k1[None, :, None], n1, x1[:, None, None]) *
stats.binom.pmf(k2[None, None, :], n2, x2[:, None, None]))
bp_values = (fvals * poly_base).sum(-1).sum(-1)
return bp_values | Evaluate 2-dimensional bernstein polynomial given grid of values
experimental
Parameters
----------
x : array_like
Values at which to evaluate the Bernstein polynomial.
fvals : ndarray
Grid values of coefficients for Bernstein polynomial basis in the
weighted sum.
Returns
-------
Bernstein polynomial at evaluation points, weighted sum of Bernstein
polynomial basis. | _eval_bernstein_2d | python | statsmodels/statsmodels | statsmodels/distributions/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py | BSD-3-Clause |
def _eval_bernstein_dd(x, fvals):
"""Evaluate d-dimensional bernstein polynomial given grid of valuesv
experimental
Parameters
----------
x : array_like
Values at which to evaluate the Bernstein polynomial.
fvals : ndarray
Grid values of coefficients for Bernstein polynomial basis in the
weighted sum.
Returns
-------
Bernstein polynomial at evaluation points, weighted sum of Bernstein
polynomial basis.
"""
k_terms = fvals.shape
k_dim = fvals.ndim
xx = np.atleast_2d(x)
# The following loop is a tricky
# we add terms for each x and expand dimension of poly base in each
# iteration using broadcasting
poly_base = np.zeros(x.shape[0])
for i in range(k_dim):
ki = np.arange(k_terms[i]).astype(float)
for _ in range(i+1):
ki = ki[..., None]
ni = k_terms[i] - 1
xi = xx[:, i]
poly_base = poly_base[None, ...] + stats.binom._logpmf(ki, ni, xi)
poly_base = np.exp(poly_base)
bp_values = fvals.T[..., None] * poly_base
for i in range(k_dim):
bp_values = bp_values.sum(0)
return bp_values | Evaluate d-dimensional bernstein polynomial given grid of valuesv
experimental
Parameters
----------
x : array_like
Values at which to evaluate the Bernstein polynomial.
fvals : ndarray
Grid values of coefficients for Bernstein polynomial basis in the
weighted sum.
Returns
-------
Bernstein polynomial at evaluation points, weighted sum of Bernstein
polynomial basis. | _eval_bernstein_dd | python | statsmodels/statsmodels | statsmodels/distributions/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py | BSD-3-Clause |
def _ecdf_mv(data, method="seq", use_ranks=True):
"""
Multivariate empiricial distribution function, empirical copula
Notes
-----
Method "seq" is faster than method "brute", but supports mainly bivariate
case. Speed advantage of "seq" is increasing in number of observations
and decreasing in number of variables.
(see Segers ...)
Warning: This does not handle ties. The ecdf is based on univariate ranks
without ties. The assignment of ranks to ties depends on the sorting
algorithm and the initial ordering of the data.
When the original data is used instead of ranks, then method "brute"
computes the correct ecdf counts even in the case of ties.
"""
x = np.asarray(data)
n = x.shape[0]
if use_ranks:
x = _rankdata_no_ties(x) / n
if method == "brute":
count = [((x <= x[i]).all(1)).sum() for i in range(n)]
count = np.asarray(count)
elif method.startswith("seq"):
sort_idx0 = np.argsort(x[:, 0])
x_s0 = x[sort_idx0]
x1 = x_s0[:, 1:]
count_smaller = [(x1[:i] <= x1[i]).all(1).sum() + 1 for i in range(n)]
count = np.empty(x.shape[0])
count[sort_idx0] = count_smaller
else:
raise ValueError("method not available")
return count, x | Multivariate empiricial distribution function, empirical copula
Notes
-----
Method "seq" is faster than method "brute", but supports mainly bivariate
case. Speed advantage of "seq" is increasing in number of observations
and decreasing in number of variables.
(see Segers ...)
Warning: This does not handle ties. The ecdf is based on univariate ranks
without ties. The assignment of ranks to ties depends on the sorting
algorithm and the initial ordering of the data.
When the original data is used instead of ranks, then method "brute"
computes the correct ecdf counts even in the case of ties. | _ecdf_mv | python | statsmodels/statsmodels | statsmodels/distributions/tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py | BSD-3-Clause |
def get_distr(self, params):
"""frozen distribution instance of the discrete distribution.
"""
args = params
distr = self.distr(*args)
return distr | frozen distribution instance of the discrete distribution. | get_distr | python | statsmodels/statsmodels | statsmodels/distributions/discrete.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/discrete.py | BSD-3-Clause |
def from_data(cls, data, k_bins):
"""Create distribution instance from data using histogram binning.
Classmethod to construct a distribution instance.
Parameters
----------
data : array_like
Data with observation in rows and random variables in columns.
Data can be 1-dimensional in the univariate case.
k_bins : int or list
Number or edges of bins to be used in numpy histogramdd.
If k_bins is a scalar int, then the number of bins of each
component will be equal to it.
Returns
-------
Instance of a Bernstein distribution
"""
data = np.asarray(data)
if np.any(data < 0) or np.any(data > 1):
raise ValueError("data needs to be in [0, 1]")
if data.ndim == 1:
data = data[:, None]
k_dim = data.shape[1]
if np.size(k_bins) == 1:
k_bins = [k_bins] * k_dim
bins = [np.linspace(-1 / ni, 1, ni + 2) for ni in k_bins]
c, e = np.histogramdd(data, bins=bins, density=False)
# TODO: check when we have zero observations, which bin?
# check bins start at 0 exept leading bin
assert all([ei[1] == 0 for ei in e])
c /= len(data)
cdf_grid = prob2cdf_grid(c)
return cls(cdf_grid) | Create distribution instance from data using histogram binning.
Classmethod to construct a distribution instance.
Parameters
----------
data : array_like
Data with observation in rows and random variables in columns.
Data can be 1-dimensional in the univariate case.
k_bins : int or list
Number or edges of bins to be used in numpy histogramdd.
If k_bins is a scalar int, then the number of bins of each
component will be equal to it.
Returns
-------
Instance of a Bernstein distribution | from_data | python | statsmodels/statsmodels | statsmodels/distributions/bernstein.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/bernstein.py | BSD-3-Clause |
def cdf(self, x):
"""cdf values evaluated at x.
Parameters
----------
x : array_like
Points of multivariate random variable at which cdf is evaluated.
This can be a single point with length equal to the dimension of
the random variable, or two dimensional with points (observations)
in rows and random variables in columns.
In the univariate case, a 1-dimensional x will be interpreted as
different points for evaluation.
Returns
-------
pdf values
Notes
-----
Warning: 2-dim x with many points can be memory intensive because
currently the bernstein polynomials will be evaluated in a fully
vectorized computation.
"""
x = np.asarray(x)
if x.ndim == 1 and self.k_dim == 1:
x = x[:, None]
cdf_ = _eval_bernstein_dd(x, self.cdf_grid)
return cdf_ | cdf values evaluated at x.
Parameters
----------
x : array_like
Points of multivariate random variable at which cdf is evaluated.
This can be a single point with length equal to the dimension of
the random variable, or two dimensional with points (observations)
in rows and random variables in columns.
In the univariate case, a 1-dimensional x will be interpreted as
different points for evaluation.
Returns
-------
pdf values
Notes
-----
Warning: 2-dim x with many points can be memory intensive because
currently the bernstein polynomials will be evaluated in a fully
vectorized computation. | cdf | python | statsmodels/statsmodels | statsmodels/distributions/bernstein.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/bernstein.py | BSD-3-Clause |
def pdf(self, x):
"""pdf values evaluated at x.
Parameters
----------
x : array_like
Points of multivariate random variable at which pdf is evaluated.
This can be a single point with length equal to the dimension of
the random variable, or two dimensional with points (observations)
in rows and random variables in columns.
In the univariate case, a 1-dimensional x will be interpreted as
different points for evaluation.
Returns
-------
cdf values
Notes
-----
Warning: 2-dim x with many points can be memory intensive because
currently the bernstein polynomials will be evaluated in a fully
vectorized computation.
"""
x = np.asarray(x)
if x.ndim == 1 and self.k_dim == 1:
x = x[:, None]
# TODO: check usage of k_grid_product. Should this go into eval?
pdf_ = self.k_grid_product * _eval_bernstein_dd(x, self.prob_grid)
return pdf_ | pdf values evaluated at x.
Parameters
----------
x : array_like
Points of multivariate random variable at which pdf is evaluated.
This can be a single point with length equal to the dimension of
the random variable, or two dimensional with points (observations)
in rows and random variables in columns.
In the univariate case, a 1-dimensional x will be interpreted as
different points for evaluation.
Returns
-------
cdf values
Notes
-----
Warning: 2-dim x with many points can be memory intensive because
currently the bernstein polynomials will be evaluated in a fully
vectorized computation. | pdf | python | statsmodels/statsmodels | statsmodels/distributions/bernstein.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/bernstein.py | BSD-3-Clause |
def get_marginal(self, idx):
"""Get marginal BernsteinDistribution.
Parameters
----------
idx : int or list of int
Index or indices of the component for which the marginal
distribution is returned.
Returns
-------
BernsteinDistribution instance for the marginal distribution.
"""
# univariate
if self.k_dim == 1:
return self
sl = [-1] * self.k_dim
if np.shape(idx) == ():
idx = [idx]
for ii in idx:
sl[ii] = slice(None, None, None)
cdf_m = self.cdf_grid[tuple(sl)]
bpd_marginal = BernsteinDistribution(cdf_m)
return bpd_marginal | Get marginal BernsteinDistribution.
Parameters
----------
idx : int or list of int
Index or indices of the component for which the marginal
distribution is returned.
Returns
-------
BernsteinDistribution instance for the marginal distribution. | get_marginal | python | statsmodels/statsmodels | statsmodels/distributions/bernstein.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/bernstein.py | BSD-3-Clause |
def rvs(self, nobs):
"""Generate random numbers from distribution.
Parameters
----------
nobs : int
Number of random observations to generate.
"""
rvs_mnl = np.random.multinomial(nobs, self.prob_grid.flatten())
k_comp = self.k_dim
rvs_m = []
for i in range(len(rvs_mnl)):
if rvs_mnl[i] != 0:
idx = np.unravel_index(i, self.prob_grid.shape)
rvsi = []
for j in range(k_comp):
n = self.k_grid[j]
xgi = self._grid.x_marginal[j][idx[j]]
# Note: x_marginal starts at 0
# x_marginal ends with 1 but that is not used by idx
rvsi.append(stats.beta.rvs(n * xgi + 1, n * (1-xgi) + 0,
size=rvs_mnl[i]))
rvs_m.append(np.column_stack(rvsi))
rvsm = np.concatenate(rvs_m)
return rvsm | Generate random numbers from distribution.
Parameters
----------
nobs : int
Number of random observations to generate. | rvs | python | statsmodels/statsmodels | statsmodels/distributions/bernstein.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/bernstein.py | BSD-3-Clause |
def monotone_fn_inverter(fn, x, vectorized=True, **keywords):
"""
Given a monotone function fn (no checking is done to verify monotonicity)
and a set of x values, return an linearly interpolated approximation
to its inverse from its values on x.
"""
x = np.asarray(x)
if vectorized:
y = fn(x, **keywords)
else:
y = []
for _x in x:
y.append(fn(_x, **keywords))
y = np.array(y)
a = np.argsort(y)
return interp1d(y[a], x[a]) | Given a monotone function fn (no checking is done to verify monotonicity)
and a set of x values, return an linearly interpolated approximation
to its inverse from its values on x. | monotone_fn_inverter | python | statsmodels/statsmodels | statsmodels/distributions/empirical_distribution.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/empirical_distribution.py | BSD-3-Clause |
def _make_index(prob,size):
"""
Returns a boolean index for given probabilities.
Notes
-----
prob = [.75,.25] means that there is a 75% chance of the first column
being True and a 25% chance of the second column being True. The
columns are mutually exclusive.
"""
rv = np.random.uniform(size=(size,1))
cumprob = np.cumsum(prob)
return np.logical_and(np.r_[0,cumprob[:-1]] <= rv, rv < cumprob) | Returns a boolean index for given probabilities.
Notes
-----
prob = [.75,.25] means that there is a 75% chance of the first column
being True and a 25% chance of the second column being True. The
columns are mutually exclusive. | _make_index | python | statsmodels/statsmodels | statsmodels/distributions/mixture_rvs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/mixture_rvs.py | BSD-3-Clause |
def mixture_rvs(prob, size, dist, kwargs=None):
"""
Sample from a mixture of distributions.
Parameters
----------
prob : array_like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array_like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm],
... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty(size)
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
sample[sample_idx] = dist[i].rvs(*args, **dict(loc=loc,scale=scale,
size=sample_size))
return sample | Sample from a mixture of distributions.
Parameters
----------
prob : array_like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array_like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm],
... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5))) | mixture_rvs | python | statsmodels/statsmodels | statsmodels/distributions/mixture_rvs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/mixture_rvs.py | BSD-3-Clause |
def pdf(self, x, prob, dist, kwargs=None):
"""
pdf a mixture of distributions.
Parameters
----------
x : array_like
Array containing locations where the PDF should be evaluated
prob : array_like
Probability of sampling from each distribution in dist
dist : array_like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> import numpy as np
>>> from scipy import stats
>>> from statsmodels.distributions.mixture_rvs import MixtureDistribution
>>> x = np.arange(-4.0, 4.0, 0.01)
>>> prob = [.75,.25]
>>> mixture = MixtureDistribution()
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm],
... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
pdf_ = prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
else:
pdf_ += prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
return pdf_ | pdf a mixture of distributions.
Parameters
----------
x : array_like
Array containing locations where the PDF should be evaluated
prob : array_like
Probability of sampling from each distribution in dist
dist : array_like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> import numpy as np
>>> from scipy import stats
>>> from statsmodels.distributions.mixture_rvs import MixtureDistribution
>>> x = np.arange(-4.0, 4.0, 0.01)
>>> prob = [.75,.25]
>>> mixture = MixtureDistribution()
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm],
... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5))) | pdf | python | statsmodels/statsmodels | statsmodels/distributions/mixture_rvs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/mixture_rvs.py | BSD-3-Clause |
def cdf(self, x, prob, dist, kwargs=None):
"""
cdf of a mixture of distributions.
Parameters
----------
x : array_like
Array containing locations where the CDF should be evaluated
prob : array_like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array_like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> import numpy as np
>>> from scipy import stats
>>> from statsmodels.distributions.mixture_rvs import MixtureDistribution
>>> x = np.arange(-4.0, 4.0, 0.01)
>>> prob = [.75,.25]
>>> mixture = MixtureDistribution()
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm],
... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
cdf_ = prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
else:
cdf_ += prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
return cdf_ | cdf of a mixture of distributions.
Parameters
----------
x : array_like
Array containing locations where the CDF should be evaluated
prob : array_like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array_like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> import numpy as np
>>> from scipy import stats
>>> from statsmodels.distributions.mixture_rvs import MixtureDistribution
>>> x = np.arange(-4.0, 4.0, 0.01)
>>> prob = [.75,.25]
>>> mixture = MixtureDistribution()
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm],
... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5))) | cdf | python | statsmodels/statsmodels | statsmodels/distributions/mixture_rvs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/mixture_rvs.py | BSD-3-Clause |
def mv_mixture_rvs(prob, size, dist, nvars, **kwargs):
"""
Sample from a mixture of multivariate distributions.
Parameters
----------
prob : array_like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array_like
An iterable of distributions instances with callable method rvs.
nvargs : int
dimension of the multivariate distribution, could be inferred instead
kwargs : tuple of dicts, optional
ignored
Examples
--------
Say we want 2000 random variables from mixture of normals with two
multivariate normal distributions, and we want to sample from the
first with probability .4 and the second with probability .6.
import statsmodels.sandbox.distributions.mv_normal as mvd
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
mu2 = np.array([4, 2.0, 2.0])
mvn3 = mvd.MVNormal(mu, cov3)
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty((size, nvars))
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
#loc = kwargs[i].get('loc',0)
#scale = kwargs[i].get('scale',1)
#args = kwargs[i].get('args',())
# use int to avoid numpy bug with np.random.multivariate_normal
sample[sample_idx] = dist[i].rvs(size=int(sample_size))
return sample | Sample from a mixture of multivariate distributions.
Parameters
----------
prob : array_like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array_like
An iterable of distributions instances with callable method rvs.
nvargs : int
dimension of the multivariate distribution, could be inferred instead
kwargs : tuple of dicts, optional
ignored
Examples
--------
Say we want 2000 random variables from mixture of normals with two
multivariate normal distributions, and we want to sample from the
first with probability .4 and the second with probability .6.
import statsmodels.sandbox.distributions.mv_normal as mvd
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
mu2 = np.array([4, 2.0, 2.0])
mvn3 = mvd.MVNormal(mu, cov3)
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3) | mv_mixture_rvs | python | statsmodels/statsmodels | statsmodels/distributions/mixture_rvs.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/mixture_rvs.py | BSD-3-Clause |
def deriv(self, t, *args):
"""First derivative of the dependence function
implemented through numerical differentiation
"""
t = np.atleast_1d(t)
return _approx_fprime_cs_scalar(t, self.evaluate) | First derivative of the dependence function
implemented through numerical differentiation | deriv | python | statsmodels/statsmodels | statsmodels/distributions/copula/depfunc_ev.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/depfunc_ev.py | BSD-3-Clause |
def deriv2(self, t, *args):
"""Second derivative of the dependence function
implemented through numerical differentiation
"""
if np.size(t) == 1:
d2 = approx_hess([t], self.evaluate, args=args)[0]
else:
d2 = np.array([approx_hess([ti], self.evaluate, args=args)[0, 0]
for ti in t])
return d2 | Second derivative of the dependence function
implemented through numerical differentiation | deriv2 | python | statsmodels/statsmodels | statsmodels/distributions/copula/depfunc_ev.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/depfunc_ev.py | BSD-3-Clause |
def rvs(self, nobs=1, cop_args=None, marg_args=None, random_state=None):
"""Draw `n` in the half-open interval ``[0, 1)``.
Sample the joint distribution.
Parameters
----------
nobs : int, optional
Number of samples to generate in the parameter space.
Default is 1.
cop_args : tuple
Copula parameters. If None, then the copula parameters will be
taken from the ``cop_args`` attribute created when initiializing
the instance.
marg_args : list of tuples
Parameters for the marginal distributions. It can be None if none
of the marginal distributions have parameters, otherwise it needs
to be a list of tuples with the same length has the number of
marginal distributions. The list can contain empty tuples for
marginal distributions that do not take parameter arguments.
random_state : {None, int, numpy.random.Generator}, optional
If `seed` is None then the legacy singleton NumPy generator.
This will change after 0.13 to use a fresh NumPy ``Generator``,
so you should explicitly pass a seeded ``Generator`` if you
need reproducible results.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Returns
-------
sample : array_like (n, d)
Sample from the joint distribution.
Notes
-----
The random samples are generated by creating a sample with uniform
margins from the copula, and using ``ppf`` to convert uniform margins
to the one specified by the marginal distribution.
See Also
--------
statsmodels.tools.rng_qrng.check_random_state
"""
if cop_args is None:
cop_args = self.cop_args
if marg_args is None:
marg_args = [()] * self.k_vars
sample = self.copula.rvs(nobs=nobs, args=cop_args,
random_state=random_state)
for i, dist in enumerate(self.marginals):
sample[:, i] = dist.ppf(0.5 + (1 - 1e-10) * (sample[:, i] - 0.5),
*marg_args[i])
return sample | Draw `n` in the half-open interval ``[0, 1)``.
Sample the joint distribution.
Parameters
----------
nobs : int, optional
Number of samples to generate in the parameter space.
Default is 1.
cop_args : tuple
Copula parameters. If None, then the copula parameters will be
taken from the ``cop_args`` attribute created when initiializing
the instance.
marg_args : list of tuples
Parameters for the marginal distributions. It can be None if none
of the marginal distributions have parameters, otherwise it needs
to be a list of tuples with the same length has the number of
marginal distributions. The list can contain empty tuples for
marginal distributions that do not take parameter arguments.
random_state : {None, int, numpy.random.Generator}, optional
If `seed` is None then the legacy singleton NumPy generator.
This will change after 0.13 to use a fresh NumPy ``Generator``,
so you should explicitly pass a seeded ``Generator`` if you
need reproducible results.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Returns
-------
sample : array_like (n, d)
Sample from the joint distribution.
Notes
-----
The random samples are generated by creating a sample with uniform
margins from the copula, and using ``ppf`` to convert uniform margins
to the one specified by the marginal distribution.
See Also
--------
statsmodels.tools.rng_qrng.check_random_state | rvs | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def cdf(self, y, cop_args=None, marg_args=None):
"""CDF of copula distribution.
Parameters
----------
y : array_like
Values of random variable at which to evaluate cdf.
If 2-dimensional, then components of multivariate random variable
need to be in columns
cop_args : tuple
Copula parameters. If None, then the copula parameters will be
taken from the ``cop_args`` attribute created when initiializing
the instance.
marg_args : list of tuples
Parameters for the marginal distributions. It can be None if none
of the marginal distributions have parameters, otherwise it needs
to be a list of tuples with the same length has the number of
marginal distributions. The list can contain empty tuples for
marginal distributions that do not take parameter arguments.
Returns
-------
cdf values
"""
y = np.asarray(y)
if cop_args is None:
cop_args = self.cop_args
if marg_args is None:
marg_args = [()] * y.shape[-1]
cdf_marg = []
for i in range(self.k_vars):
cdf_marg.append(self.marginals[i].cdf(y[..., i], *marg_args[i]))
u = np.column_stack(cdf_marg)
if y.ndim == 1:
u = u.squeeze()
return self.copula.cdf(u, cop_args) | CDF of copula distribution.
Parameters
----------
y : array_like
Values of random variable at which to evaluate cdf.
If 2-dimensional, then components of multivariate random variable
need to be in columns
cop_args : tuple
Copula parameters. If None, then the copula parameters will be
taken from the ``cop_args`` attribute created when initiializing
the instance.
marg_args : list of tuples
Parameters for the marginal distributions. It can be None if none
of the marginal distributions have parameters, otherwise it needs
to be a list of tuples with the same length has the number of
marginal distributions. The list can contain empty tuples for
marginal distributions that do not take parameter arguments.
Returns
-------
cdf values | cdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def pdf(self, y, cop_args=None, marg_args=None):
"""PDF of copula distribution.
Parameters
----------
y : array_like
Values of random variable at which to evaluate cdf.
If 2-dimensional, then components of multivariate random variable
need to be in columns
cop_args : tuple
Copula parameters. If None, then the copula parameters will be
taken from the ``cop_args`` attribute created when initiializing
the instance.
marg_args : list of tuples
Parameters for the marginal distributions. It can be None if none
of the marginal distributions have parameters, otherwise it needs
to be a list of tuples with the same length has the number of
marginal distributions. The list can contain empty tuples for
marginal distributions that do not take parameter arguments.
Returns
-------
pdf values
"""
return np.exp(self.logpdf(y, cop_args=cop_args, marg_args=marg_args)) | PDF of copula distribution.
Parameters
----------
y : array_like
Values of random variable at which to evaluate cdf.
If 2-dimensional, then components of multivariate random variable
need to be in columns
cop_args : tuple
Copula parameters. If None, then the copula parameters will be
taken from the ``cop_args`` attribute created when initiializing
the instance.
marg_args : list of tuples
Parameters for the marginal distributions. It can be None if none
of the marginal distributions have parameters, otherwise it needs
to be a list of tuples with the same length has the number of
marginal distributions. The list can contain empty tuples for
marginal distributions that do not take parameter arguments.
Returns
-------
pdf values | pdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def logpdf(self, y, cop_args=None, marg_args=None):
"""Log-pdf of copula distribution.
Parameters
----------
y : array_like
Values of random variable at which to evaluate cdf.
If 2-dimensional, then components of multivariate random variable
need to be in columns
cop_args : tuple
Copula parameters. If None, then the copula parameters will be
taken from the ``cop_args`` attribute creating when initiializing
the instance.
marg_args : list of tuples
Parameters for the marginal distributions. It can be None if none
of the marginal distributions have parameters, otherwise it needs
to be a list of tuples with the same length has the number of
marginal distributions. The list can contain empty tuples for
marginal distributions that do not take parameter arguments.
Returns
-------
log-pdf values
"""
y = np.asarray(y)
if cop_args is None:
cop_args = self.cop_args
if marg_args is None:
marg_args = tuple([()] * y.shape[-1])
lpdf = 0.0
cdf_marg = []
for i in range(self.k_vars):
lpdf += self.marginals[i].logpdf(y[..., i], *marg_args[i])
cdf_marg.append(self.marginals[i].cdf(y[..., i], *marg_args[i]))
u = np.column_stack(cdf_marg)
if y.ndim == 1:
u = u.squeeze()
lpdf += self.copula.logpdf(u, cop_args)
return lpdf | Log-pdf of copula distribution.
Parameters
----------
y : array_like
Values of random variable at which to evaluate cdf.
If 2-dimensional, then components of multivariate random variable
need to be in columns
cop_args : tuple
Copula parameters. If None, then the copula parameters will be
taken from the ``cop_args`` attribute creating when initiializing
the instance.
marg_args : list of tuples
Parameters for the marginal distributions. It can be None if none
of the marginal distributions have parameters, otherwise it needs
to be a list of tuples with the same length has the number of
marginal distributions. The list can contain empty tuples for
marginal distributions that do not take parameter arguments.
Returns
-------
log-pdf values | logpdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def rvs(self, nobs=1, args=(), random_state=None):
"""Draw `n` in the half-open interval ``[0, 1)``.
Marginals are uniformly distributed.
Parameters
----------
nobs : int, optional
Number of samples to generate from the copula. Default is 1.
args : tuple
Arguments for copula parameters. The number of arguments depends
on the copula.
random_state : {None, int, numpy.random.Generator}, optional
If `seed` is None then the legacy singleton NumPy generator.
This will change after 0.13 to use a fresh NumPy ``Generator``,
so you should explicitly pass a seeded ``Generator`` if you
need reproducible results.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Returns
-------
sample : array_like (nobs, d)
Sample from the copula.
See Also
--------
statsmodels.tools.rng_qrng.check_random_state
"""
raise NotImplementedError | Draw `n` in the half-open interval ``[0, 1)``.
Marginals are uniformly distributed.
Parameters
----------
nobs : int, optional
Number of samples to generate from the copula. Default is 1.
args : tuple
Arguments for copula parameters. The number of arguments depends
on the copula.
random_state : {None, int, numpy.random.Generator}, optional
If `seed` is None then the legacy singleton NumPy generator.
This will change after 0.13 to use a fresh NumPy ``Generator``,
so you should explicitly pass a seeded ``Generator`` if you
need reproducible results.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Returns
-------
sample : array_like (nobs, d)
Sample from the copula.
See Also
--------
statsmodels.tools.rng_qrng.check_random_state | rvs | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def pdf(self, u, args=()):
"""Probability density function of copula.
Parameters
----------
u : array_like, 2-D
Points of random variables in unit hypercube at which method is
evaluated.
The second (or last) dimension should be the same as the dimension
of the random variable, e.g. 2 for bivariate copula.
args : tuple
Arguments for copula parameters. The number of arguments depends
on the copula.
Returns
-------
pdf : ndarray, (nobs, k_dim)
Copula pdf evaluated at points ``u``.
""" | Probability density function of copula.
Parameters
----------
u : array_like, 2-D
Points of random variables in unit hypercube at which method is
evaluated.
The second (or last) dimension should be the same as the dimension
of the random variable, e.g. 2 for bivariate copula.
args : tuple
Arguments for copula parameters. The number of arguments depends
on the copula.
Returns
-------
pdf : ndarray, (nobs, k_dim)
Copula pdf evaluated at points ``u``. | pdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def logpdf(self, u, args=()):
"""Log of copula pdf, loglikelihood.
Parameters
----------
u : array_like, 2-D
Points of random variables in unit hypercube at which method is
evaluated.
The second (or last) dimension should be the same as the dimension
of the random variable, e.g. 2 for bivariate copula.
args : tuple
Arguments for copula parameters. The number of arguments depends
on the copula.
Returns
-------
cdf : ndarray, (nobs, k_dim)
Copula log-pdf evaluated at points ``u``.
"""
return np.log(self.pdf(u, *args)) | Log of copula pdf, loglikelihood.
Parameters
----------
u : array_like, 2-D
Points of random variables in unit hypercube at which method is
evaluated.
The second (or last) dimension should be the same as the dimension
of the random variable, e.g. 2 for bivariate copula.
args : tuple
Arguments for copula parameters. The number of arguments depends
on the copula.
Returns
-------
cdf : ndarray, (nobs, k_dim)
Copula log-pdf evaluated at points ``u``. | logpdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def cdf(self, u, args=()):
"""Cumulative distribution function evaluated at points u.
Parameters
----------
u : array_like, 2-D
Points of random variables in unit hypercube at which method is
evaluated.
The second (or last) dimension should be the same as the dimension
of the random variable, e.g. 2 for bivariate copula.
args : tuple
Arguments for copula parameters. The number of arguments depends
on the copula.
Returns
-------
cdf : ndarray, (nobs, k_dim)
Copula cdf evaluated at points ``u``.
""" | Cumulative distribution function evaluated at points u.
Parameters
----------
u : array_like, 2-D
Points of random variables in unit hypercube at which method is
evaluated.
The second (or last) dimension should be the same as the dimension
of the random variable, e.g. 2 for bivariate copula.
args : tuple
Arguments for copula parameters. The number of arguments depends
on the copula.
Returns
-------
cdf : ndarray, (nobs, k_dim)
Copula cdf evaluated at points ``u``. | cdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def plot_scatter(self, sample=None, nobs=500, random_state=None, ax=None):
"""Sample the copula and plot.
Parameters
----------
sample : array-like, optional
The sample to plot. If not provided (the default), a sample
is generated.
nobs : int, optional
Number of samples to generate from the copula.
random_state : {None, int, numpy.random.Generator}, optional
If `seed` is None then the legacy singleton NumPy generator.
This will change after 0.13 to use a fresh NumPy ``Generator``,
so you should explicitly pass a seeded ``Generator`` if you
need reproducible results.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
Returns
-------
fig : Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
sample : array_like (n, d)
Sample from the copula.
See Also
--------
statsmodels.tools.rng_qrng.check_random_state
"""
if self.k_dim != 2:
raise ValueError("Can only plot 2-dimensional Copula.")
if sample is None:
sample = self.rvs(nobs=nobs, random_state=random_state)
fig, ax = utils.create_mpl_ax(ax)
ax.scatter(sample[:, 0], sample[:, 1])
ax.set_xlabel('u')
ax.set_ylabel('v')
return fig, sample | Sample the copula and plot.
Parameters
----------
sample : array-like, optional
The sample to plot. If not provided (the default), a sample
is generated.
nobs : int, optional
Number of samples to generate from the copula.
random_state : {None, int, numpy.random.Generator}, optional
If `seed` is None then the legacy singleton NumPy generator.
This will change after 0.13 to use a fresh NumPy ``Generator``,
so you should explicitly pass a seeded ``Generator`` if you
need reproducible results.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
Returns
-------
fig : Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
sample : array_like (n, d)
Sample from the copula.
See Also
--------
statsmodels.tools.rng_qrng.check_random_state | plot_scatter | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def plot_pdf(self, ticks_nbr=10, ax=None):
"""Plot the PDF.
Parameters
----------
ticks_nbr : int, optional
Number of color isolines for the PDF. Default is 10.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
Returns
-------
fig : Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
from matplotlib import pyplot as plt
if self.k_dim != 2:
import warnings
warnings.warn("Plotting 2-dimensional Copula.")
n_samples = 100
eps = 1e-4
uu, vv = np.meshgrid(np.linspace(eps, 1 - eps, n_samples),
np.linspace(eps, 1 - eps, n_samples))
points = np.vstack([uu.ravel(), vv.ravel()]).T
data = self.pdf(points).T.reshape(uu.shape)
min_ = np.nanpercentile(data, 5)
max_ = np.nanpercentile(data, 95)
fig, ax = utils.create_mpl_ax(ax)
vticks = np.linspace(min_, max_, num=ticks_nbr)
range_cbar = [min_, max_]
cs = ax.contourf(uu, vv, data, vticks,
antialiased=True, vmin=range_cbar[0],
vmax=range_cbar[1])
ax.set_xlabel("u")
ax.set_ylabel("v")
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_aspect('equal')
cbar = plt.colorbar(cs, ticks=vticks)
cbar.set_label('p')
fig.tight_layout()
return fig | Plot the PDF.
Parameters
----------
ticks_nbr : int, optional
Number of color isolines for the PDF. Default is 10.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
Returns
-------
fig : Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected. | plot_pdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def tau_simulated(self, nobs=1024, random_state=None):
"""Kendall's tau based on simulated samples.
Returns
-------
tau : float
Kendall's tau.
"""
x = self.rvs(nobs, random_state=random_state)
return stats.kendalltau(x[:, 0], x[:, 1])[0] | Kendall's tau based on simulated samples.
Returns
-------
tau : float
Kendall's tau. | tau_simulated | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def fit_corr_param(self, data):
"""Copula correlation parameter using Kendall's tau of sample data.
Parameters
----------
data : array_like
Sample data used to fit `theta` using Kendall's tau.
Returns
-------
corr_param : float
Correlation parameter of the copula, ``theta`` in Archimedean and
pearson correlation in elliptical.
If k_dim > 2, then average tau is used.
"""
x = np.asarray(data)
if x.shape[1] == 2:
tau = stats.kendalltau(x[:, 0], x[:, 1])[0]
else:
k = self.k_dim
taus = [stats.kendalltau(x[..., i], x[..., j])[0]
for i in range(k) for j in range(i+1, k)]
tau = np.mean(taus)
return self._arg_from_tau(tau) | Copula correlation parameter using Kendall's tau of sample data.
Parameters
----------
data : array_like
Sample data used to fit `theta` using Kendall's tau.
Returns
-------
corr_param : float
Correlation parameter of the copula, ``theta`` in Archimedean and
pearson correlation in elliptical.
If k_dim > 2, then average tau is used. | fit_corr_param | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def _arg_from_tau(self, tau):
"""Compute correlation parameter from tau.
Parameters
----------
tau : float
Kendall's tau.
Returns
-------
corr_param : float
Correlation parameter of the copula, ``theta`` in Archimedean and
pearson correlation in elliptical.
"""
raise NotImplementedError | Compute correlation parameter from tau.
Parameters
----------
tau : float
Kendall's tau.
Returns
-------
corr_param : float
Correlation parameter of the copula, ``theta`` in Archimedean and
pearson correlation in elliptical. | _arg_from_tau | python | statsmodels/statsmodels | statsmodels/distributions/copula/copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py | BSD-3-Clause |
def tau(self, corr=None):
"""Bivariate kendall's tau based on correlation coefficient.
Parameters
----------
corr : None or float
Pearson correlation. If corr is None, then the correlation will be
taken from the copula attribute.
Returns
-------
Kendall's tau that corresponds to pearson correlation in the
elliptical copula.
"""
if corr is None:
corr = self.corr
if corr.shape == (2, 2):
corr = corr[0, 1]
rho = 2 * np.arcsin(corr) / np.pi
return rho | Bivariate kendall's tau based on correlation coefficient.
Parameters
----------
corr : None or float
Pearson correlation. If corr is None, then the correlation will be
taken from the copula attribute.
Returns
-------
Kendall's tau that corresponds to pearson correlation in the
elliptical copula. | tau | python | statsmodels/statsmodels | statsmodels/distributions/copula/elliptical.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/elliptical.py | BSD-3-Clause |
def corr_from_tau(self, tau):
"""Pearson correlation from kendall's tau.
Parameters
----------
tau : array_like
Kendall's tau correlation coefficient.
Returns
-------
Pearson correlation coefficient for given tau in elliptical
copula. This can be used as parameter for an elliptical copula.
"""
corr = np.sin(tau * np.pi / 2)
return corr | Pearson correlation from kendall's tau.
Parameters
----------
tau : array_like
Kendall's tau correlation coefficient.
Returns
-------
Pearson correlation coefficient for given tau in elliptical
copula. This can be used as parameter for an elliptical copula. | corr_from_tau | python | statsmodels/statsmodels | statsmodels/distributions/copula/elliptical.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/elliptical.py | BSD-3-Clause |
def dependence_tail(self, corr=None):
"""
Bivariate tail dependence parameter.
Joe (2014) p. 182
Parameters
----------
corr : any
Tail dependence for Gaussian copulas is always zero.
Argument will be ignored
Returns
-------
Lower and upper tail dependence coefficients of the copula with given
Pearson correlation coefficient.
"""
return 0, 0 | Bivariate tail dependence parameter.
Joe (2014) p. 182
Parameters
----------
corr : any
Tail dependence for Gaussian copulas is always zero.
Argument will be ignored
Returns
-------
Lower and upper tail dependence coefficients of the copula with given
Pearson correlation coefficient. | dependence_tail | python | statsmodels/statsmodels | statsmodels/distributions/copula/elliptical.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/elliptical.py | BSD-3-Clause |
def spearmans_rho(self, corr=None):
"""
Bivariate Spearman's rho based on correlation coefficient.
Joe (2014) p. 182
Parameters
----------
corr : None or float
Pearson correlation. If corr is None, then the correlation will be
taken from the copula attribute.
Returns
-------
Spearman's rho that corresponds to pearson correlation in the
elliptical copula.
"""
if corr is None:
corr = self.corr
if corr.shape == (2, 2):
corr = corr[0, 1]
tau = 6 * np.arcsin(corr / 2) / np.pi
return tau | Bivariate Spearman's rho based on correlation coefficient.
Joe (2014) p. 182
Parameters
----------
corr : None or float
Pearson correlation. If corr is None, then the correlation will be
taken from the copula attribute.
Returns
-------
Spearman's rho that corresponds to pearson correlation in the
elliptical copula. | spearmans_rho | python | statsmodels/statsmodels | statsmodels/distributions/copula/elliptical.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/elliptical.py | BSD-3-Clause |
def dependence_tail(self, corr=None):
"""
Bivariate tail dependence parameter.
Joe (2014) p. 182
Parameters
----------
corr : None or float
Pearson correlation. If corr is None, then the correlation will be
taken from the copula attribute.
Returns
-------
Lower and upper tail dependence coefficients of the copula with given
Pearson correlation coefficient.
"""
if corr is None:
corr = self.corr
if corr.shape == (2, 2):
corr = corr[0, 1]
df = self.df
t = - np.sqrt((df + 1) * (1 - corr) / 1 + corr)
# Note self.distr_uv is frozen, df cannot change, use stats.t instead
lam = 2 * stats.t.cdf(t, df + 1)
return lam, lam | Bivariate tail dependence parameter.
Joe (2014) p. 182
Parameters
----------
corr : None or float
Pearson correlation. If corr is None, then the correlation will be
taken from the copula attribute.
Returns
-------
Lower and upper tail dependence coefficients of the copula with given
Pearson correlation coefficient. | dependence_tail | python | statsmodels/statsmodels | statsmodels/distributions/copula/elliptical.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/elliptical.py | BSD-3-Clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.