code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def _debyem1_expansion(x):
"""Debye function minus 1, Taylor series approximation around zero
function is not used
"""
x = np.asarray(x)
# Expansion derived using Wolfram alpha
dm1 = (-x/4 + x**2/36 - x**4/3600 + x**6/211680 - x**8/10886400 +
x**10/526901760 - x**12 * 691/16999766784000)
return dm1 | Debye function minus 1, Taylor series approximation around zero
function is not used | _debyem1_expansion | python | statsmodels/statsmodels | statsmodels/distributions/copula/archimedean.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py | BSD-3-Clause |
def tau_frank(theta):
"""Kendall's tau for Frank Copula
This uses Taylor series expansion for theta <= 1.
Parameters
----------
theta : float
Parameter of the Frank copula. (not vectorized)
Returns
-------
tau : float, tau for given theta
"""
if theta <= 1:
tau = _tau_frank_expansion(theta)
else:
debye_value = _debye(theta)
tau = 1 + 4 * (debye_value - 1) / theta
return tau | Kendall's tau for Frank Copula
This uses Taylor series expansion for theta <= 1.
Parameters
----------
theta : float
Parameter of the Frank copula. (not vectorized)
Returns
-------
tau : float, tau for given theta | tau_frank | python | statsmodels/statsmodels | statsmodels/distributions/copula/archimedean.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py | BSD-3-Clause |
def cdf(self, u, args=()):
"""Evaluate cdf of Archimedean copula."""
args = self._handle_args(args)
u = self._handle_u(u)
axis = -1
phi = self.transform.evaluate
phi_inv = self.transform.inverse
cdfv = phi_inv(phi(u, *args).sum(axis), *args)
# clip numerical noise
out = cdfv if isinstance(cdfv, np.ndarray) else None
cdfv = np.clip(cdfv, 0., 1., out=out) # inplace if possible
return cdfv | Evaluate cdf of Archimedean copula. | cdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/archimedean.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py | BSD-3-Clause |
def pdf(self, u, args=()):
"""Evaluate pdf of Archimedean copula."""
u = self._handle_u(u)
args = self._handle_args(args)
axis = -1
phi_d1 = self.transform.deriv
if u.shape[-1] == 2:
psi_d = self.transform.deriv2_inverse
elif u.shape[-1] == 3:
psi_d = self.transform.deriv3_inverse
elif u.shape[-1] == 4:
psi_d = self.transform.deriv4_inverse
else:
# will raise NotImplementedError if not available
k = u.shape[-1]
def psi_d(*args):
return self.transform.derivk_inverse(k, *args)
psi = self.transform.evaluate(u, *args).sum(axis)
pdfv = np.prod(phi_d1(u, *args), axis)
pdfv *= (psi_d(psi, *args))
# use abs, I'm not sure yet about where to add signs
return np.abs(pdfv) | Evaluate pdf of Archimedean copula. | pdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/archimedean.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py | BSD-3-Clause |
def logpdf(self, u, args=()):
"""Evaluate log pdf of multivariate Archimedean copula."""
u = self._handle_u(u)
args = self._handle_args(args)
axis = -1
phi_d1 = self.transform.deriv
if u.shape[-1] == 2:
psi_d = self.transform.deriv2_inverse
elif u.shape[-1] == 3:
psi_d = self.transform.deriv3_inverse
elif u.shape[-1] == 4:
psi_d = self.transform.deriv4_inverse
else:
# will raise NotImplementedError if not available
k = u.shape[-1]
def psi_d(*args):
return self.transform.derivk_inverse(k, *args)
psi = self.transform.evaluate(u, *args).sum(axis)
# I need np.abs because derivatives are negative,
# is this correct for mv?
logpdfv = np.sum(np.log(np.abs(phi_d1(u, *args))), axis)
logpdfv += np.log(np.abs(psi_d(psi, *args)))
return logpdfv | Evaluate log pdf of multivariate Archimedean copula. | logpdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/archimedean.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py | BSD-3-Clause |
def cdfcond_2g1(self, u, args=()):
"""Conditional cdf of second component given the value of first.
"""
u = self._handle_u(u)
th, = self._handle_args(args)
if u.shape[-1] == 2:
# bivariate case
u1, u2 = u[..., 0], u[..., 1]
cdfc = np.exp(- th * u1)
cdfc /= np.expm1(-th) / np.expm1(- th * u2) + np.expm1(- th * u1)
return cdfc
else:
raise NotImplementedError("u needs to be bivariate (2 columns)") | Conditional cdf of second component given the value of first. | cdfcond_2g1 | python | statsmodels/statsmodels | statsmodels/distributions/copula/archimedean.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py | BSD-3-Clause |
def ppfcond_2g1(self, q, u1, args=()):
"""Conditional pdf of second component given the value of first.
"""
u1 = np.asarray(u1)
th, = self._handle_args(args)
if u1.shape[-1] == 1:
# bivariate case, conditional on value of first variable
ppfc = - np.log(1 + np.expm1(- th) /
((1 / q - 1) * np.exp(-th * u1) + 1)) / th
return ppfc
else:
raise NotImplementedError("u needs to be bivariate (2 columns)") | Conditional pdf of second component given the value of first. | ppfcond_2g1 | python | statsmodels/statsmodels | statsmodels/distributions/copula/archimedean.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py | BSD-3-Clause |
def copula_bv_ev(u, transform, args=()):
'''generic bivariate extreme value copula
'''
u, v = u
return np.exp(np.log(u * v) * (transform(np.log(u)/np.log(u*v), *args))) | generic bivariate extreme value copula | copula_bv_ev | python | statsmodels/statsmodels | statsmodels/distributions/copula/extreme_value.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/extreme_value.py | BSD-3-Clause |
def cdf(self, u, args=()):
"""Evaluate cdf of bivariate extreme value copula.
Parameters
----------
u : array_like
Values of random bivariate random variable, each defined on [0, 1],
for which cdf is computed.
Can be two dimensional with multivariate components in columns and
observation in rows.
args : tuple
Required parameters for the copula. The meaning and number of
parameters in the tuple depends on the specific copula.
Returns
-------
CDF values at evaluation points.
"""
# currently only Bivariate
u, v = np.asarray(u).T
args = self._handle_args(args)
cdfv = np.exp(np.log(u * v) *
self.transform(np.log(u)/np.log(u*v), *args))
return cdfv | Evaluate cdf of bivariate extreme value copula.
Parameters
----------
u : array_like
Values of random bivariate random variable, each defined on [0, 1],
for which cdf is computed.
Can be two dimensional with multivariate components in columns and
observation in rows.
args : tuple
Required parameters for the copula. The meaning and number of
parameters in the tuple depends on the specific copula.
Returns
-------
CDF values at evaluation points. | cdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/extreme_value.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/extreme_value.py | BSD-3-Clause |
def pdf(self, u, args=()):
"""Evaluate pdf of bivariate extreme value copula.
Parameters
----------
u : array_like
Values of random bivariate random variable, each defined on [0, 1],
for which cdf is computed.
Can be two dimensional with multivariate components in columns and
observation in rows.
args : tuple
Required parameters for the copula. The meaning and number of
parameters in the tuple depends on the specific copula.
Returns
-------
PDF values at evaluation points.
"""
tr = self.transform
u1, u2 = np.asarray(u).T
args = self._handle_args(args)
log_u12 = np.log(u1 * u2)
t = np.log(u1) / log_u12
cdf = self.cdf(u, args)
dep = tr(t, *args)
d1 = tr.deriv(t, *args)
d2 = tr.deriv2(t, *args)
pdf_ = cdf / (u1 * u2) * ((dep + (1 - t) * d1) * (dep - t * d1) -
d2 * (1 - t) * t / log_u12)
return pdf_ | Evaluate pdf of bivariate extreme value copula.
Parameters
----------
u : array_like
Values of random bivariate random variable, each defined on [0, 1],
for which cdf is computed.
Can be two dimensional with multivariate components in columns and
observation in rows.
args : tuple
Required parameters for the copula. The meaning and number of
parameters in the tuple depends on the specific copula.
Returns
-------
PDF values at evaluation points. | pdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/extreme_value.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/extreme_value.py | BSD-3-Clause |
def logpdf(self, u, args=()):
"""Evaluate log-pdf of bivariate extreme value copula.
Parameters
----------
u : array_like
Values of random bivariate random variable, each defined on [0, 1],
for which cdf is computed.
Can be two dimensional with multivariate components in columns and
observation in rows.
args : tuple
Required parameters for the copula. The meaning and number of
parameters in the tuple depends on the specific copula.
Returns
-------
Log-pdf values at evaluation points.
"""
return np.log(self.pdf(u, args=args)) | Evaluate log-pdf of bivariate extreme value copula.
Parameters
----------
u : array_like
Values of random bivariate random variable, each defined on [0, 1],
for which cdf is computed.
Can be two dimensional with multivariate components in columns and
observation in rows.
args : tuple
Required parameters for the copula. The meaning and number of
parameters in the tuple depends on the specific copula.
Returns
-------
Log-pdf values at evaluation points. | logpdf | python | statsmodels/statsmodels | statsmodels/distributions/copula/extreme_value.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/extreme_value.py | BSD-3-Clause |
def conditional_2g1(self, u, args=()):
"""conditional distribution
not yet implemented
C2|1(u2|u1) := ∂C(u1, u2) / ∂u1 = C(u1, u2) / u1 * (A(t) − t A'(t))
where t = np.log(v)/np.log(u*v)
"""
raise NotImplementedError | conditional distribution
not yet implemented
C2|1(u2|u1) := ∂C(u1, u2) / ∂u1 = C(u1, u2) / u1 * (A(t) − t A'(t))
where t = np.log(v)/np.log(u*v) | conditional_2g1 | python | statsmodels/statsmodels | statsmodels/distributions/copula/extreme_value.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/extreme_value.py | BSD-3-Clause |
def rvs_kernel(sample, size, bw=1, k_func=None, return_extras=False):
"""Random sampling from empirical copula using Beta distribution
Parameters
----------
sample : ndarray
Sample of multivariate observations in (o, 1) interval.
size : int
Number of observations to simulate.
bw : float
Bandwidth for Beta sampling. The beta copula corresponds to a kernel
estimate of the distribution. bw=1 corresponds to the empirical beta
copula. A small bandwidth like bw=0.001 corresponds to small noise
added to the empirical distribution. Larger bw, e.g. bw=10 corresponds
to kernel estimate with more smoothing.
k_func : None or callable
The default kernel function is currently a beta function with 1 added
to the first beta parameter.
return_extras : bool
If this is False, then only the random sample will be returned.
If true, then extra information is returned that is mainly of interest
for verification.
Returns
-------
rvs : ndarray
Multivariate sample with ``size`` observations drawn from the Beta
Copula.
Notes
-----
Status: experimental, API will change.
"""
# vectorized for observations
n = sample.shape[0]
if k_func is None:
kfunc = _kernel_rvs_beta1
idx = np.random.randint(0, n, size=size)
xi = sample[idx]
krvs = np.column_stack([kfunc(xii, bw) for xii in xi.T])
if return_extras:
return krvs, idx, xi
else:
return krvs | Random sampling from empirical copula using Beta distribution
Parameters
----------
sample : ndarray
Sample of multivariate observations in (o, 1) interval.
size : int
Number of observations to simulate.
bw : float
Bandwidth for Beta sampling. The beta copula corresponds to a kernel
estimate of the distribution. bw=1 corresponds to the empirical beta
copula. A small bandwidth like bw=0.001 corresponds to small noise
added to the empirical distribution. Larger bw, e.g. bw=10 corresponds
to kernel estimate with more smoothing.
k_func : None or callable
The default kernel function is currently a beta function with 1 added
to the first beta parameter.
return_extras : bool
If this is False, then only the random sample will be returned.
If true, then extra information is returned that is mainly of interest
for verification.
Returns
-------
rvs : ndarray
Multivariate sample with ``size`` observations drawn from the Beta
Copula.
Notes
-----
Status: experimental, API will change. | rvs_kernel | python | statsmodels/statsmodels | statsmodels/distributions/copula/other_copulas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/other_copulas.py | BSD-3-Clause |
def clear_cache(self):
"""clear cache of Sterling numbers
"""
self._cache = {} | clear cache of Sterling numbers | clear_cache | python | statsmodels/statsmodels | statsmodels/distributions/copula/_special.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/_special.py | BSD-3-Clause |
def li3(z):
"""Polylogarithm for negative integer order -3
Li(-3, z)
"""
return z * (1 + 4 * z + z**2) / (1 - z)**4 | Polylogarithm for negative integer order -3
Li(-3, z) | li3 | python | statsmodels/statsmodels | statsmodels/distributions/copula/_special.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/_special.py | BSD-3-Clause |
def li4(z):
"""Polylogarithm for negative integer order -4
Li(-4, z)
"""
return z * (1 + z) * (1 + 10 * z + z**2) / (1 - z)**5 | Polylogarithm for negative integer order -4
Li(-4, z) | li4 | python | statsmodels/statsmodels | statsmodels/distributions/copula/_special.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/_special.py | BSD-3-Clause |
def lin(n, z):
"""Polylogarithm for negative integer order -n
Li(-n, z)
https://en.wikipedia.org/wiki/Polylogarithm#Particular_values
"""
if np.size(z) > 1:
z = np.array(z)[..., None]
k = np.arange(n+1)
st2 = np.array([sterling2(n + 1, ki + 1) for ki in k])
res = (-1)**(n+1) * np.sum(factorial(k) * st2 * (-1 / (1 - z))**(k+1),
axis=-1)
return res | Polylogarithm for negative integer order -n
Li(-n, z)
https://en.wikipedia.org/wiki/Polylogarithm#Particular_values | lin | python | statsmodels/statsmodels | statsmodels/distributions/copula/_special.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/_special.py | BSD-3-Clause |
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target - 1)):
return target
match = float("inf") # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
p2 = 2 ** ((quotient - 1).bit_length())
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match | Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer. | _next_regular | python | statsmodels/statsmodels | statsmodels/compat/scipy.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/scipy.py | BSD-3-Clause |
def _valarray(shape, value=np.nan, typecode=None):
"""Return an array of all value."""
out = np.ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, np.ndarray):
out = np.asarray(out)
return out | Return an array of all value. | _valarray | python | statsmodels/statsmodels | statsmodels/compat/scipy.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/scipy.py | BSD-3-Clause |
def pytest_warns(
warning: type[Warning] | tuple[type[Warning], ...] | None
) -> WarningsChecker | NoWarningsChecker:
"""
Parameters
----------
warning : {None, Warning, Tuple[Warning]}
None if no warning is produced, or a single or multiple Warnings
Returns
-------
cm
"""
if warning is None:
return NoWarningsChecker()
else:
assert warning is not None
return warns(warning) | Parameters
----------
warning : {None, Warning, Tuple[Warning]}
None if no warning is produced, or a single or multiple Warnings
Returns
-------
cm | pytest_warns | python | statsmodels/statsmodels | statsmodels/compat/pytest.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pytest.py | BSD-3-Clause |
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out | Remove single-dimensional entries from array and convert to scalar,
if necessary. | _squeeze_output | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps | Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility. | _eigvalsh_to_eps | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float) | A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers. | _pinv_1d | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None, int, a RandomState instance, or a
np.random.Generator instance.
If None (or np.random), use the RandomState singleton used by
np.random.
If already a RandomState or Generator instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state | Get or set the RandomState object for generating random variates.
This can be either None, int, a RandomState instance, or a
np.random.Generator instance.
If None (or np.random), use the RandomState singleton used by
np.random.
If already a RandomState or Generator instance, use it.
If an int, use a new RandomState instance seeded with seed. | random_state | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed) | Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information. | __call__ | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be "
"a scalar.")
# Check input sizes and return full arrays for mean and cov if
# necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." %
dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov | Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix. | _process_parameters | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x | Adjust quantiles array so that last axis labels the components of
each data point. | _process_quantiles | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha) | Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead. | _logpdf | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out) | Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s | logpdf | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out) | Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s | pdf | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def _cdf(self, x, mean, cov, maxpts, abseps, releps):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the cumulative distribution function.
mean : ndarray
Mean of the distribution
cov : array_like
Covariance matrix of the distribution
maxpts: integer
The maximum number of points to use for integration
abseps: float
Absolute error tolerance
releps: float
Relative error tolerance
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'cdf' instead.
.. versionadded:: 1.0.0
"""
lower = np.full(mean.shape, -np.inf)
# mvnun expects 1-d arguments, so process points sequentially
def func1d(x_slice):
return mvn.mvnun(lower, x_slice, mean, cov, maxpts, abseps, releps)[0]
out = np.apply_along_axis(func1d, -1, x)
return _squeeze_output(out) | Parameters
----------
x : ndarray
Points at which to evaluate the cumulative distribution function.
mean : ndarray
Mean of the distribution
cov : array_like
Covariance matrix of the distribution
maxpts: integer
The maximum number of points to use for integration
abseps: float
Absolute error tolerance
releps: float
Relative error tolerance
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'cdf' instead.
.. versionadded:: 1.0.0 | _cdf | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""
Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts: integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps: float, optional
Absolute error tolerance (default 1e-5)
releps: float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Log of the cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = np.log(self._cdf(x, mean, cov, maxpts, abseps, releps))
return out | Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts: integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps: float, optional
Absolute error tolerance (default 1e-5)
releps: float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Log of the cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0 | logcdf | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""
Multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts: integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps: float, optional
Absolute error tolerance (default 1e-5)
releps: float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = self._cdf(x, mean, cov, maxpts, abseps, releps)
return out | Multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts: integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps: float, optional
Absolute error tolerance (default 1e-5)
releps: float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0 | cdf | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out) | Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s | rvs | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet | Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s | entropy | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None,
maxpts=None, abseps=1e-5, releps=1e-5):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional
This parameter defines the object to use for drawing random
variates.
If `seed` is `None` the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is None.
maxpts: integer, optional
The maximum number of points to use for integration of the
cumulative distribution function (default `1000000*dim`)
abseps: float, optional
Absolute error tolerance for the cumulative distribution function
(default 1e-5)
releps: float, optional
Relative error tolerance for the cumulative distribution function
(default 1e-5)
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * self.dim
self.maxpts = maxpts
self.abseps = abseps
self.releps = releps | Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional
This parameter defines the object to use for drawing random
variates.
If `seed` is `None` the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is None.
maxpts: integer, optional
The maximum number of points to use for integration of the
cumulative distribution function (default `1000000*dim`)
abseps: float, optional
Absolute error tolerance for the cumulative distribution function
(default 1e-5)
releps: float, optional
Relative error tolerance for the cumulative distribution function
(default 1e-5)
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]]) | __init__ | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet) | Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution | entropy | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def __init__(self, seed=None):
"""
Initialize a multivariate t-distributed random variable.
Parameters
----------
seed : Random state.
"""
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params)
self._random_state = check_random_state(seed) | Initialize a multivariate t-distributed random variable.
Parameters
----------
seed : Random state. | __init__ | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def __call__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""
Create a frozen multivariate t-distribution. See
`multivariate_t_frozen` for parameters.
"""
if df == np.inf:
return multivariate_normal_frozen(mean=loc, cov=shape,
allow_singular=allow_singular,
seed=seed)
return multivariate_t_frozen(loc=loc, shape=shape, df=df,
allow_singular=allow_singular, seed=seed) | Create a frozen multivariate t-distribution. See
`multivariate_t_frozen` for parameters. | __call__ | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False):
"""
Multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the probability density function.
%(_mvt_doc_default_callparams)s
Returns
-------
pdf : Probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.pdf(x, loc, shape, df)
array([0.00075713])
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape, allow_singular=allow_singular)
logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df,
dim, shape_info.rank)
return np.exp(logpdf) | Multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the probability density function.
%(_mvt_doc_default_callparams)s
Returns
-------
pdf : Probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.pdf(x, loc, shape, df)
array([0.00075713]) | pdf | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def logpdf(self, x, loc=None, shape=1, df=1):
"""
Log of the multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability density
function.
%(_mvt_doc_default_callparams)s
Returns
-------
logpdf : Log of the probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.logpdf(x, loc, shape, df)
array([-7.1859802])
See Also
--------
pdf : Probability density function.
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape)
return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim,
shape_info.rank) | Log of the multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability density
function.
%(_mvt_doc_default_callparams)s
Returns
-------
logpdf : Log of the probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.logpdf(x, loc, shape, df)
array([-7.1859802])
See Also
--------
pdf : Probability density function. | logpdf | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank):
"""Utility method `pdf`, `logpdf` for parameters.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability density
function.
loc : ndarray
Location of the distribution.
prec_U : ndarray
A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse
of the shape matrix.
log_pdet : float
Logarithm of the determinant of the shape matrix.
df : float
Degrees of freedom of the distribution.
dim : int
Dimension of the quantiles x.
rank : int
Rank of the shape matrix.
Notes
-----
As this function does no argument checking, it should not be called
directly; use 'logpdf' instead.
"""
if df == np.inf:
return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank)
dev = x - loc
maha = np.square(np.dot(dev, prec_U)).sum(axis=-1)
t = 0.5 * (df + dim)
A = gammaln(t)
B = gammaln(0.5 * df)
C = dim/2. * np.log(df * np.pi)
D = 0.5 * log_pdet
E = -t * np.log(1 + (1./df) * maha)
return _squeeze_output(A - B - C - D + E) | Utility method `pdf`, `logpdf` for parameters.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability density
function.
loc : ndarray
Location of the distribution.
prec_U : ndarray
A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse
of the shape matrix.
log_pdet : float
Logarithm of the determinant of the shape matrix.
df : float
Degrees of freedom of the distribution.
dim : int
Dimension of the quantiles x.
rank : int
Rank of the shape matrix.
Notes
-----
As this function does no argument checking, it should not be called
directly; use 'logpdf' instead. | _logpdf | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):
"""
Draw random samples from a multivariate t-distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `P`), where `P` is the
dimension of the random variable.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.rvs(loc, shape, df)
array([[0.93477495, 3.00408716]])
"""
# For implementation details, see equation (3):
#
# Hofert, "On Sampling from the Multivariatet Distribution", 2013
# http://rjournal.github.io/archive/2013-2/hofert.pdf
#
dim, loc, shape, df = self._process_parameters(loc, shape, df)
if random_state is not None:
rng = check_random_state(random_state)
else:
rng = self._random_state
if np.isinf(df):
x = np.ones(size)
else:
x = rng.chisquare(df, size=size) / df
z = rng.multivariate_normal(np.zeros(dim), shape, size=size)
samples = loc + z / np.sqrt(x)[:, None]
return _squeeze_output(samples) | Draw random samples from a multivariate t-distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `P`), where `P` is the
dimension of the random variable.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.rvs(loc, shape, df)
array([[0.93477495, 3.00408716]]) | rvs | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def _process_parameters(self, loc, shape, df):
"""
Infer dimensionality from location array and shape matrix, handle
defaults, and ensure compatible dimensions.
"""
if loc is None and shape is None:
loc = np.asarray(0, dtype=float)
shape = np.asarray(1, dtype=float)
dim = 1
elif loc is None:
shape = np.asarray(shape, dtype=float)
if shape.ndim < 2:
dim = 1
else:
dim = shape.shape[0]
loc = np.zeros(dim)
elif shape is None:
loc = np.asarray(loc, dtype=float)
dim = loc.size
shape = np.eye(dim)
else:
shape = np.asarray(shape, dtype=float)
loc = np.asarray(loc, dtype=float)
dim = loc.size
if dim == 1:
loc.shape = (1,)
shape.shape = (1, 1)
if loc.ndim != 1 or loc.shape[0] != dim:
raise ValueError("Array 'loc' must be a vector of length %d." %
dim)
if shape.ndim == 0:
shape = shape * np.eye(dim)
elif shape.ndim == 1:
shape = np.diag(shape)
elif shape.ndim == 2 and shape.shape != (dim, dim):
rows, cols = shape.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(shape.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'loc' is a vector of length %d.")
msg = msg % (str(shape.shape), len(loc))
raise ValueError(msg)
elif shape.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % shape.ndim)
# Process degrees of freedom.
if df is None:
df = 1
elif df <= 0:
raise ValueError("'df' must be greater than zero.")
elif np.isnan(df):
raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.")
return dim, loc, shape, df | Infer dimensionality from location array and shape matrix, handle
defaults, and ensure compatible dimensions. | _process_parameters | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def __init__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""
Create a frozen multivariate t distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
Examples
--------
>>> loc = np.zeros(3)
>>> shape = np.eye(3)
>>> df = 10
>>> dist = multivariate_t(loc, shape, df)
>>> dist.rvs()
array([[ 0.81412036, -1.53612361, 0.42199647]])
>>> dist.pdf([1, 1, 1])
array([0.01237803])
"""
self._dist = multivariate_t_gen(seed)
dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)
self.dim, self.loc, self.shape, self.df = dim, loc, shape, df
self.shape_info = _PSD(shape, allow_singular=allow_singular) | Create a frozen multivariate t distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
Examples
--------
>>> loc = np.zeros(3)
>>> shape = np.eye(3)
>>> df = 10
>>> dist = multivariate_t(loc, shape, df)
>>> dist.rvs()
array([[ 0.81412036, -1.53612361, 0.42199647]])
>>> dist.pdf([1, 1, 1])
array([0.01237803]) | __init__ | python | statsmodels/statsmodels | statsmodels/compat/_scipy_multivariate_t.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py | BSD-3-Clause |
def lstsq(a, b, rcond=None):
"""
Shim that allows modern rcond setting with backward compat for NumPY
earlier than 1.14
"""
if NP_LT_114 and rcond is None:
rcond = -1
return np.linalg.lstsq(a, b, rcond=rcond) | Shim that allows modern rcond setting with backward compat for NumPY
earlier than 1.14 | lstsq | python | statsmodels/statsmodels | statsmodels/compat/numpy.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/numpy.py | BSD-3-Clause |
def is_int_index(index: pd.Index) -> bool:
"""
Check if an index is integral
Parameters
----------
index : pd.Index
Any numeric index
Returns
-------
bool
True if is an index with a standard integral type
"""
return (
isinstance(index, pd.Index)
and isinstance(index.dtype, np.dtype)
and np.issubdtype(index.dtype, np.integer)
) | Check if an index is integral
Parameters
----------
index : pd.Index
Any numeric index
Returns
-------
bool
True if is an index with a standard integral type | is_int_index | python | statsmodels/statsmodels | statsmodels/compat/pandas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pandas.py | BSD-3-Clause |
def is_float_index(index: pd.Index) -> bool:
"""
Check if an index is floating
Parameters
----------
index : pd.Index
Any numeric index
Returns
-------
bool
True if an index with a standard numpy floating dtype
"""
return (
isinstance(index, pd.Index)
and isinstance(index.dtype, np.dtype)
and np.issubdtype(index.dtype, np.floating)
) | Check if an index is floating
Parameters
----------
index : pd.Index
Any numeric index
Returns
-------
bool
True if an index with a standard numpy floating dtype | is_float_index | python | statsmodels/statsmodels | statsmodels/compat/pandas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pandas.py | BSD-3-Clause |
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
rands_chars = np.array(
list(string.ascii_letters + string.digits), dtype=(np.str_, 1)
)
retval = (
np.random.choice(rands_chars, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype) | Generate an array of byte strings. | rands_array | python | statsmodels/statsmodels | statsmodels/compat/pandas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pandas.py | BSD-3-Clause |
def make_dataframe():
"""
Simple verion of pandas._testing.makeDataFrame
"""
n = 30
k = 4
index = pd.Index(rands_array(nchars=10, size=n), name=None)
data = {
c: pd.Series(np.random.randn(n), index=index)
for c in string.ascii_uppercase[:k]
}
return pd.DataFrame(data) | Simple verion of pandas._testing.makeDataFrame | make_dataframe | python | statsmodels/statsmodels | statsmodels/compat/pandas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pandas.py | BSD-3-Clause |
def to_numpy(po: pd.DataFrame) -> np.ndarray:
"""
Workaround legacy pandas lacking to_numpy
Parameters
----------
po : Pandas obkect
Returns
-------
ndarray
A numpy array
"""
try:
return po.to_numpy()
except AttributeError:
return po.values | Workaround legacy pandas lacking to_numpy
Parameters
----------
po : Pandas obkect
Returns
-------
ndarray
A numpy array | to_numpy | python | statsmodels/statsmodels | statsmodels/compat/pandas.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pandas.py | BSD-3-Clause |
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, "temporary_class", (), {}) | Create a base class with a metaclass. | with_metaclass | python | statsmodels/statsmodels | statsmodels/compat/python.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/python.py | BSD-3-Clause |
def get_all_sorted_knots(
x, n_inner_knots=None, inner_knots=None, lower_bound=None, upper_bound=None
):
"""Gets all knots locations with lower and upper exterior knots included.
If needed, inner knots are computed as equally spaced quantiles of the
input data falling between given lower and upper bounds.
:param x: The 1-d array data values.
:param n_inner_knots: Number of inner knots to compute.
:param inner_knots: Provided inner knots if any.
:param lower_bound: The lower exterior knot location. If unspecified, the
minimum of ``x`` values is used.
:param upper_bound: The upper exterior knot location. If unspecified, the
maximum of ``x`` values is used.
:return: The array of ``n_inner_knots + 2`` distinct knots.
:raise ValueError: for various invalid parameters sets or if unable to
compute ``n_inner_knots + 2`` distinct knots.
"""
if lower_bound is None and x.size == 0:
raise ValueError(
"Cannot set lower exterior knot location: empty "
"input data and lower_bound not specified."
)
elif lower_bound is None and x.size != 0:
lower_bound = np.min(x)
if upper_bound is None and x.size == 0:
raise ValueError(
"Cannot set upper exterior knot location: empty "
"input data and upper_bound not specified."
)
elif upper_bound is None and x.size != 0:
upper_bound = np.max(x)
if upper_bound < lower_bound:
raise ValueError(
"lower_bound > upper_bound (%r > %r)" % (lower_bound, upper_bound)
)
if inner_knots is None and n_inner_knots is not None:
if n_inner_knots < 0:
raise ValueError(
"Invalid requested number of inner knots: %r" % (n_inner_knots,)
)
x = x[(lower_bound <= x) & (x <= upper_bound)]
x = np.unique(x)
if x.size != 0:
inner_knots_q = np.linspace(0, 100, n_inner_knots + 2)[1:-1]
# .tolist() is necessary to work around a bug in numpy 1.8
inner_knots = np.asarray(np.percentile(x, inner_knots_q.tolist()))
elif n_inner_knots == 0:
inner_knots = np.array([])
else:
raise ValueError(
"No data values between lower_bound(=%r) and "
"upper_bound(=%r): cannot compute requested "
"%r inner knot(s)." % (lower_bound, upper_bound, n_inner_knots)
)
elif inner_knots is not None:
inner_knots = np.unique(inner_knots)
if n_inner_knots is not None and n_inner_knots != inner_knots.size:
raise ValueError(
"Needed number of inner knots=%r does not match "
"provided number of inner knots=%r." % (n_inner_knots, inner_knots.size)
)
n_inner_knots = inner_knots.size
if np.any(inner_knots < lower_bound):
raise ValueError(
"Some knot values (%s) fall below lower bound "
"(%r)." % (inner_knots[inner_knots < lower_bound], lower_bound)
)
if np.any(inner_knots > upper_bound):
raise ValueError(
"Some knot values (%s) fall above upper bound "
"(%r)." % (inner_knots[inner_knots > upper_bound], upper_bound)
)
else:
raise ValueError("Must specify either 'n_inner_knots' or 'inner_knots'.")
all_knots = np.concatenate(([lower_bound, upper_bound], inner_knots))
all_knots = np.unique(all_knots)
if all_knots.size != n_inner_knots + 2:
raise ValueError(
"Unable to compute n_inner_knots(=%r) + 2 distinct "
"knots: %r data value(s) found between "
"lower_bound(=%r) and upper_bound(=%r)."
% (n_inner_knots, x.size, lower_bound, upper_bound)
)
return all_knots | Gets all knots locations with lower and upper exterior knots included.
If needed, inner knots are computed as equally spaced quantiles of the
input data falling between given lower and upper bounds.
:param x: The 1-d array data values.
:param n_inner_knots: Number of inner knots to compute.
:param inner_knots: Provided inner knots if any.
:param lower_bound: The lower exterior knot location. If unspecified, the
minimum of ``x`` values is used.
:param upper_bound: The upper exterior knot location. If unspecified, the
maximum of ``x`` values is used.
:return: The array of ``n_inner_knots + 2`` distinct knots.
:raise ValueError: for various invalid parameters sets or if unable to
compute ``n_inner_knots + 2`` distinct knots. | get_all_sorted_knots | python | statsmodels/statsmodels | statsmodels/compat/patsy.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/patsy.py | BSD-3-Clause |
def _get_init_kwds(self):
"""return dictionary with extra keys used in model.__init__
"""
kwds = {key: getattr(self, key, None)
for key in self._init_keys}
return kwds | return dictionary with extra keys used in model.__init__ | _get_init_kwds | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def from_formula(cls, formula, data, subset=None, drop_cols=None,
*args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model.
data : array_like
The data for the model. See Notes.
subset : array_like
An array-like object of booleans, integers, or index values that
indicate the subset of df to use in the model. Assumes df is a
`pandas.DataFrame`.
drop_cols : array_like
Columns to drop from the design matrix. Cannot be used to
drop terms involving categoricals.
*args
Additional positional argument that are passed to the model.
**kwargs
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model
The model instance.
Notes
-----
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame.
"""
# TODO: provide a docs template for args/kwargs from child models
# TODO: subset could use syntax. issue #469.
mgr = FormulaManager()
if subset is not None:
data = data.loc[subset]
eval_env = kwargs.pop('eval_env', None)
if eval_env is None:
eval_env = 2
elif eval_env == -1:
eval_env = mgr.get_empty_eval_env()
elif isinstance(eval_env, int):
eval_env += 1 # we're going down the stack again
missing = kwargs.get('missing', 'drop')
if missing == 'none': # with patsy it's drop or raise. let's raise.
missing = 'raise'
tmp = handle_formula_data(data, None, formula, depth=eval_env,
missing=missing)
((endog, exog), missing_idx, model_spec) = tmp
max_endog = cls._formula_max_endog
if (max_endog is not None and
endog.ndim > 1 and endog.shape[1] > max_endog):
raise ValueError('endog has evaluated to an array with multiple '
'columns that has shape {}. This occurs when '
'the variable converted to endog is non-numeric'
' (e.g., bool or str).'.format(endog.shape))
if drop_cols is not None and len(drop_cols) > 0:
cols = [x for x in exog.columns if x not in drop_cols]
if len(cols) < len(exog.columns):
exog = exog[cols]
spec_cols = list(mgr.get_term_names(model_spec))
for col in drop_cols:
try:
if mgr.engine == "formulaic" and col == "Intercept":
col = "1"
spec_cols.remove(col)
except ValueError:
pass # OK if not present
# TODO: Patsy migration, need to add method to handle
model_spec = model_spec.subset(spec_cols)
kwargs.update({'missing_idx': missing_idx,
'missing': missing,
'formula': formula, # attach formula for unpckling
'model_spec': model_spec})
mod = cls(endog, exog, *args, **kwargs)
mod.formula = formula
# since we got a dataframe, attach the original
mod.data.frame = data
return mod | Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model.
data : array_like
The data for the model. See Notes.
subset : array_like
An array-like object of booleans, integers, or index values that
indicate the subset of df to use in the model. Assumes df is a
`pandas.DataFrame`.
drop_cols : array_like
Columns to drop from the design matrix. Cannot be used to
drop terms involving categoricals.
*args
Additional positional argument that are passed to the model.
**kwargs
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model
The model instance.
Notes
-----
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame. | from_formula | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def endog_names(self):
"""
Names of endogenous variables.
"""
return self.data.ynames | Names of endogenous variables. | endog_names | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def exog_names(self) -> list[str] | None:
"""
Names of exogenous variables.
"""
return self.data.xnames | Names of exogenous variables. | exog_names | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def fit(self):
"""
Fit a model to data.
"""
raise NotImplementedError | Fit a model to data. | fit | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def predict(self, params, exog=None, *args, **kwargs):
"""
After a model has been fit predict returns the fitted values.
This is a placeholder intended to be overwritten by individual models.
"""
raise NotImplementedError | After a model has been fit predict returns the fitted values.
This is a placeholder intended to be overwritten by individual models. | predict | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def initialize(self):
"""
Initialize (possibly re-initialize) a Model instance.
For example, if the the design matrix of a linear model changes then
initialized can be used to recompute values using the modified design
matrix.
"""
pass | Initialize (possibly re-initialize) a Model instance.
For example, if the the design matrix of a linear model changes then
initialized can be used to recompute values using the modified design
matrix. | initialize | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def loglike(self, params):
"""
Log-likelihood of model.
Parameters
----------
params : ndarray
The model parameters used to compute the log-likelihood.
Notes
-----
Must be overridden by subclasses.
"""
raise NotImplementedError | Log-likelihood of model.
Parameters
----------
params : ndarray
The model parameters used to compute the log-likelihood.
Notes
-----
Must be overridden by subclasses. | loglike | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def score(self, params):
"""
Score vector of model.
The gradient of logL with respect to each parameter.
Parameters
----------
params : ndarray
The parameters to use when evaluating the Hessian.
Returns
-------
ndarray
The score vector evaluated at the parameters.
"""
raise NotImplementedError | Score vector of model.
The gradient of logL with respect to each parameter.
Parameters
----------
params : ndarray
The parameters to use when evaluating the Hessian.
Returns
-------
ndarray
The score vector evaluated at the parameters. | score | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def information(self, params):
"""
Fisher information matrix of model.
Returns -1 * Hessian of the log-likelihood evaluated at params.
Parameters
----------
params : ndarray
The model parameters.
"""
raise NotImplementedError | Fisher information matrix of model.
Returns -1 * Hessian of the log-likelihood evaluated at params.
Parameters
----------
params : ndarray
The model parameters. | information | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def hessian(self, params):
"""
The Hessian matrix of the model.
Parameters
----------
params : ndarray
The parameters to use when evaluating the Hessian.
Returns
-------
ndarray
The hessian evaluated at the parameters.
"""
raise NotImplementedError | The Hessian matrix of the model.
Parameters
----------
params : ndarray
The parameters to use when evaluating the Hessian.
Returns
-------
ndarray
The hessian evaluated at the parameters. | hessian | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def _fit_zeros(self, keep_index=None, start_params=None,
return_auxiliary=False, k_params=None, **fit_kwds):
"""experimental, fit the model subject to zero constraints
Intended for internal use cases until we know what we need.
API will need to change to handle models with two exog.
This is not yet supported by all model subclasses.
This is essentially a simplified version of `fit_constrained`, and
does not need to use `offset`.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Some subclasses could use a more efficient calculation than using a
new model.
Parameters
----------
keep_index : array_like (int or bool) or slice
variables that should be dropped.
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
k_params : int or None
If None, then we try to infer from start_params or model.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
results : Results instance
"""
# we need to append index of extra params to keep_index as in
# NegativeBinomial
if hasattr(self, 'k_extra') and self.k_extra > 0:
# we cannot change the original, TODO: should we add keep_index_params?
keep_index = np.array(keep_index, copy=True)
k = self.exog.shape[1]
extra_index = np.arange(k, k + self.k_extra)
keep_index_p = np.concatenate((keep_index, extra_index))
else:
keep_index_p = keep_index
# not all models support start_params, drop if None, hide them in fit_kwds
if start_params is not None:
fit_kwds['start_params'] = start_params[keep_index_p]
k_params = len(start_params)
# ignore k_params in this case, or verify consisteny?
# build auxiliary model and fit
init_kwds = self._get_init_kwds()
mod_constr = self.__class__(self.endog, self.exog[:, keep_index],
**init_kwds)
res_constr = mod_constr.fit(**fit_kwds)
# switch name, only need keep_index for params below
keep_index = keep_index_p
if k_params is None:
k_params = self.exog.shape[1]
k_params += getattr(self, 'k_extra', 0)
params_full = np.zeros(k_params)
params_full[keep_index] = res_constr.params
# create dummy results Instance, TODO: wire up properly
# TODO: this could be moved into separate private method if needed
# discrete L1 fit_regularized doens't reestimate AFAICS
# RLM does not have method, disp nor warn_convergence keywords
# OLS, WLS swallows extra kwds with **kwargs, but does not have method='nm'
try:
# Note: addding full_output=False causes exceptions
res = self.fit(maxiter=0, disp=0, method='nm', skip_hessian=True,
warn_convergence=False, start_params=params_full)
# we get a wrapper back
except (TypeError, ValueError):
res = self.fit()
# Warning: make sure we are not just changing the wrapper instead of
# results #2400
# TODO: do we need to change res._results.scale in some models?
if hasattr(res_constr.model, 'scale'):
# Note: res.model is self
# GLM problem, see #2399,
# TODO: remove from model if not needed anymore
res.model.scale = res._results.scale = res_constr.model.scale
if hasattr(res_constr, 'mle_retvals'):
res._results.mle_retvals = res_constr.mle_retvals
# not available for not scipy optimization, e.g. glm irls
# TODO: what retvals should be required?
# res.mle_retvals['fcall'] = res_constr.mle_retvals.get('fcall', np.nan)
# res.mle_retvals['iterations'] = res_constr.mle_retvals.get(
# 'iterations', np.nan)
# res.mle_retvals['converged'] = res_constr.mle_retvals['converged']
# overwrite all mle_settings
if hasattr(res_constr, 'mle_settings'):
res._results.mle_settings = res_constr.mle_settings
res._results.params = params_full
if (not hasattr(res._results, 'normalized_cov_params') or
res._results.normalized_cov_params is None):
res._results.normalized_cov_params = np.zeros((k_params, k_params))
else:
res._results.normalized_cov_params[...] = 0
# fancy indexing requires integer array
keep_index = np.array(keep_index)
res._results.normalized_cov_params[keep_index[:, None], keep_index] = \
res_constr.normalized_cov_params
k_constr = res_constr.df_resid - res._results.df_resid
if hasattr(res_constr, 'cov_params_default'):
res._results.cov_params_default = np.zeros((k_params, k_params))
res._results.cov_params_default[keep_index[:, None], keep_index] = \
res_constr.cov_params_default
if hasattr(res_constr, 'cov_type'):
res._results.cov_type = res_constr.cov_type
res._results.cov_kwds = res_constr.cov_kwds
res._results.keep_index = keep_index
res._results.df_resid = res_constr.df_resid
res._results.df_model = res_constr.df_model
res._results.k_constr = k_constr
res._results.results_constrained = res_constr
# special temporary workaround for RLM
# need to be able to override robust covariances
if hasattr(res.model, 'M'):
del res._results._cache['resid']
del res._results._cache['fittedvalues']
del res._results._cache['sresid']
cov = res._results._cache['bcov_scaled']
# inplace adjustment
cov[...] = 0
cov[keep_index[:, None], keep_index] = res_constr.bcov_scaled
res._results.cov_params_default = cov
return res | experimental, fit the model subject to zero constraints
Intended for internal use cases until we know what we need.
API will need to change to handle models with two exog.
This is not yet supported by all model subclasses.
This is essentially a simplified version of `fit_constrained`, and
does not need to use `offset`.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Some subclasses could use a more efficient calculation than using a
new model.
Parameters
----------
keep_index : array_like (int or bool) or slice
variables that should be dropped.
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
k_params : int or None
If None, then we try to infer from start_params or model.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
results : Results instance | _fit_zeros | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def _fit_collinear(self, atol=1e-14, rtol=1e-13, **kwds):
"""experimental, fit of the model without collinear variables
This currently uses QR to drop variables based on the given
sequence.
Options will be added in future, when the supporting functions
to identify collinear variables become available.
"""
# ------ copied from PR #2380 remove when merged
x = self.exog
tol = atol + rtol * x.var(0)
r = np.linalg.qr(x, mode='r')
mask = np.abs(r.diagonal()) < np.sqrt(tol)
# TODO add to results instance
# idx_collinear = np.where(mask)[0]
idx_keep = np.where(~mask)[0]
return self._fit_zeros(keep_index=idx_keep, **kwds) | experimental, fit of the model without collinear variables
This currently uses QR to drop variables based on the given
sequence.
Options will be added in future, when the supporting functions
to identify collinear variables become available. | _fit_collinear | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def initialize(self):
"""
Initialize (possibly re-initialize) a Model instance. For
instance, the design matrix of a linear model may change
and some things must be recomputed.
"""
if not self.score: # right now score is not optional
self.score = lambda x: approx_fprime(x, self.loglike)
if not self.hessian:
pass
else: # can use approx_hess_p if we have a gradient
if not self.hessian:
pass
# Initialize is called by
# statsmodels.model.LikelihoodModel.__init__
# and should contain any preprocessing that needs to be done for a model
if self.exog is not None:
# assume constant
er = np.linalg.matrix_rank(self.exog)
self.df_model = float(er - 1)
self.df_resid = float(self.exog.shape[0] - er)
else:
self.df_model = np.nan
self.df_resid = np.nan
super().initialize() | Initialize (possibly re-initialize) a Model instance. For
instance, the design matrix of a linear model may change
and some things must be recomputed. | initialize | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def expandparams(self, params):
"""
expand to full parameter array when some parameters are fixed
Parameters
----------
params : ndarray
reduced parameter array
Returns
-------
paramsfull : ndarray
expanded parameter array where fixed parameters are included
Notes
-----
Calling this requires that self.fixed_params and self.fixed_paramsmask
are defined.
*developer notes:*
This can be used in the log-likelihood to ...
this could also be replaced by a more general parameter
transformation.
"""
paramsfull = self.fixed_params.copy()
paramsfull[self.fixed_paramsmask] = params
return paramsfull | expand to full parameter array when some parameters are fixed
Parameters
----------
params : ndarray
reduced parameter array
Returns
-------
paramsfull : ndarray
expanded parameter array where fixed parameters are included
Notes
-----
Calling this requires that self.fixed_params and self.fixed_paramsmask
are defined.
*developer notes:*
This can be used in the log-likelihood to ...
this could also be replaced by a more general parameter
transformation. | expandparams | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def reduceparams(self, params):
"""Reduce parameters"""
return params[self.fixed_paramsmask] | Reduce parameters | reduceparams | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def loglike(self, params):
"""Log-likelihood of model at params"""
return self.loglikeobs(params).sum(0) | Log-likelihood of model at params | loglike | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def nloglike(self, params):
"""Negative log-likelihood of model at params"""
return -self.loglikeobs(params).sum(0) | Negative log-likelihood of model at params | nloglike | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def loglikeobs(self, params):
"""
Log-likelihood of the model for all observations at params.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : array_like
The log likelihood of the model evaluated at `params`.
"""
return -self.nloglikeobs(params) | Log-likelihood of the model for all observations at params.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : array_like
The log likelihood of the model evaluated at `params`. | loglikeobs | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def score(self, params):
"""
Gradient of log-likelihood evaluated at params
"""
kwds = {}
kwds.setdefault('centered', True)
return approx_fprime(params, self.loglike, **kwds).ravel() | Gradient of log-likelihood evaluated at params | score | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def score_obs(self, params, **kwds):
"""
Jacobian/Gradient of log-likelihood evaluated at params for each
observation.
"""
# kwds.setdefault('epsilon', 1e-4)
kwds.setdefault('centered', True)
return approx_fprime(params, self.loglikeobs, **kwds) | Jacobian/Gradient of log-likelihood evaluated at params for each
observation. | score_obs | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def hessian(self, params):
"""
Hessian of log-likelihood evaluated at params
"""
from statsmodels.tools.numdiff import approx_hess
# need options for hess (epsilon)
return approx_hess(params, self.loglike) | Hessian of log-likelihood evaluated at params | hessian | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def hessian_factor(self, params, scale=None, observed=True):
"""Weights for calculating Hessian
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian_factor : ndarray, 1d
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
"""
raise NotImplementedError | Weights for calculating Hessian
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian_factor : ndarray, 1d
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)` | hessian_factor | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def initialize(self, model, params, **kwargs):
"""
Initialize (possibly re-initialize) a Results instance.
Parameters
----------
model : Model
The model instance.
params : ndarray
The model parameters.
**kwargs
Any additional keyword arguments required to initialize the model.
"""
self.params = params
self.model = model
if hasattr(model, 'k_constant'):
self.k_constant = model.k_constant | Initialize (possibly re-initialize) a Results instance.
Parameters
----------
model : Model
The model instance.
params : ndarray
The model parameters.
**kwargs
Any additional keyword arguments required to initialize the model. | initialize | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array_like, optional
The values for which you want to predict. see Notes below.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
*args
Additional arguments to pass to the model, see the
predict method of the model for the details.
**kwargs
Additional keywords arguments to pass to the model, see the
predict method of the model for the details.
Returns
-------
array_like
See self.model.predict.
Notes
-----
The types of exog that are supported depends on whether a formula
was used in the specification of the model.
If a formula was used, then exog is processed in the same way as
the original data. This transformation needs to have key access to the
same variable names, and can be a pandas DataFrame or a dict like
object that contains numpy arrays.
If no formula was used, then the provided exog needs to have the
same number of columns as the original exog in the model. No
transformation of the data is performed except converting it to
a numpy array.
Row indices as in pandas data frames are supported, and added to the
returned prediction.
"""
exog, exog_index = self._transform_predict_exog(exog,
transform=transform)
predict_results = self.model.predict(self.params, exog, *args,
**kwargs)
if exog_index is not None and not hasattr(predict_results,
'predicted_values'):
if predict_results.ndim == 1:
return pd.Series(predict_results, index=exog_index)
else:
return pd.DataFrame(predict_results, index=exog_index)
else:
return predict_results | Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array_like, optional
The values for which you want to predict. see Notes below.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
*args
Additional arguments to pass to the model, see the
predict method of the model for the details.
**kwargs
Additional keywords arguments to pass to the model, see the
predict method of the model for the details.
Returns
-------
array_like
See self.model.predict.
Notes
-----
The types of exog that are supported depends on whether a formula
was used in the specification of the model.
If a formula was used, then exog is processed in the same way as
the original data. This transformation needs to have key access to the
same variable names, and can be a pandas DataFrame or a dict like
object that contains numpy arrays.
If no formula was used, then the provided exog needs to have the
same number of columns as the original exog in the model. No
transformation of the data is performed except converting it to
a numpy array.
Row indices as in pandas data frames are supported, and added to the
returned prediction. | predict | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def summary(self):
"""
Summary
Not implemented
"""
raise NotImplementedError | Summary
Not implemented | summary | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def normalized_cov_params(self):
"""See specific model class docstring"""
raise NotImplementedError | See specific model class docstring | normalized_cov_params | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def use_t(self):
"""Flag indicating to use the Student's distribution in inference."""
return self._use_t | Flag indicating to use the Student's distribution in inference. | use_t | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def llf(self):
"""Log-likelihood of model"""
return self.model.loglike(self.params) | Log-likelihood of model | llf | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def tvalues(self):
"""
Return the t-statistic for a given parameter estimate.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
return self.params / self.bse | Return the t-statistic for a given parameter estimate. | tvalues | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def pvalues(self):
"""The two-tailed p values for the t-stats of the params."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
if self.use_t:
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
return stats.t.sf(np.abs(self.tvalues), df_resid) * 2
else:
return stats.norm.sf(np.abs(self.tvalues)) * 2 | The two-tailed p values for the t-stats of the params. | pvalues | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def cov_params(self, r_matrix=None, column=None, scale=None, cov_p=None,
other=None):
"""
Compute the variance/covariance matrix.
The variance/covariance matrix can be of a linear contrast of the
estimated parameters or all params multiplied by scale which will
usually be an estimate of sigma^2. Scale is assumed to be a scalar.
Parameters
----------
r_matrix : array_like
Can be 1d, or 2d. Can be used alone or with other.
column : array_like, optional
Must be used on its own. Can be 0d or 1d see below.
scale : float, optional
Can be specified or not. Default is None, which means that
the scale argument is taken from the model.
cov_p : ndarray, optional
The covariance of the parameters. If not provided, this value is
read from `self.normalized_cov_params` or
`self.cov_params_default`.
other : array_like, optional
Can be used when r_matrix is specified.
Returns
-------
ndarray
The covariance matrix of the parameter estimates or of linear
combination of parameter estimates. See Notes.
Notes
-----
(The below are assumed to be in matrix notation.)
If no argument is specified returns the covariance matrix of a model
``(scale)*(X.T X)^(-1)``
If contrast is specified it pre and post-multiplies as follows
``(scale) * r_matrix (X.T X)^(-1) r_matrix.T``
If contrast and other are specified returns
``(scale) * r_matrix (X.T X)^(-1) other.T``
If column is specified returns
``(scale) * (X.T X)^(-1)[column,column]`` if column is 0d
OR
``(scale) * (X.T X)^(-1)[column][:,column]`` if column is 1d
"""
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
dot_fun = nan_dot
else:
dot_fun = np.dot
if (cov_p is None and self.normalized_cov_params is None and
not hasattr(self, 'cov_params_default')):
raise ValueError('need covariance of parameters for computing '
'(unnormalized) covariances')
if column is not None and (r_matrix is not None or other is not None):
raise ValueError('Column should be specified without other '
'arguments.')
if other is not None and r_matrix is None:
raise ValueError('other can only be specified with r_matrix')
if cov_p is None:
if hasattr(self, 'cov_params_default'):
cov_p = self.cov_params_default
else:
if scale is None:
scale = self.scale
cov_p = self.normalized_cov_params * scale
if column is not None:
column = np.asarray(column)
if column.shape == ():
return cov_p[column, column]
else:
return cov_p[column[:, None], column]
elif r_matrix is not None:
r_matrix = np.asarray(r_matrix)
if r_matrix.shape == ():
raise ValueError("r_matrix should be 1d or 2d")
if other is None:
other = r_matrix
else:
other = np.asarray(other)
tmp = dot_fun(r_matrix, dot_fun(cov_p, np.transpose(other)))
return tmp
else: # if r_matrix is None and column is None:
return cov_p | Compute the variance/covariance matrix.
The variance/covariance matrix can be of a linear contrast of the
estimated parameters or all params multiplied by scale which will
usually be an estimate of sigma^2. Scale is assumed to be a scalar.
Parameters
----------
r_matrix : array_like
Can be 1d, or 2d. Can be used alone or with other.
column : array_like, optional
Must be used on its own. Can be 0d or 1d see below.
scale : float, optional
Can be specified or not. Default is None, which means that
the scale argument is taken from the model.
cov_p : ndarray, optional
The covariance of the parameters. If not provided, this value is
read from `self.normalized_cov_params` or
`self.cov_params_default`.
other : array_like, optional
Can be used when r_matrix is specified.
Returns
-------
ndarray
The covariance matrix of the parameter estimates or of linear
combination of parameter estimates. See Notes.
Notes
-----
(The below are assumed to be in matrix notation.)
If no argument is specified returns the covariance matrix of a model
``(scale)*(X.T X)^(-1)``
If contrast is specified it pre and post-multiplies as follows
``(scale) * r_matrix (X.T X)^(-1) r_matrix.T``
If contrast and other are specified returns
``(scale) * r_matrix (X.T X)^(-1) other.T``
If column is specified returns
``(scale) * (X.T X)^(-1)[column,column]`` if column is 0d
OR
``(scale) * (X.T X)^(-1)[column][:,column]`` if column is 1d | cov_params | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def t_test(self, r_matrix, cov_p=None, use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q.
Parameters
----------
r_matrix : {array_like, str, tuple}
One of:
- array : If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q). If q is given,
can be either a scalar or a length p row vector.
cov_p : array_like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
use_t : bool, optional
If use_t is None, then the default of the model is used. If use_t
is True, then the p-values are based on the t distribution. If
use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
ContrastResults
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
See Also
--------
tvalues : Individual t statistics for the estimated parameters.
f_test : Perform an F tests on model parameters.
patsy.DesignInfo.linear_constraint : Specify a linear constraint.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> r = np.zeros_like(results.params)
>>> r[5:] = [1,-1]
>>> print(r)
[ 0. 0. 0. 0. 0. 1. -1.]
r tests that the coefficients on the 5th and 6th independent
variable are the same.
>>> T_test = results.t_test(r)
>>> print(T_test)
Test for Constraints
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
c0 -1829.2026 455.391 -4.017 0.003 -2859.368 -799.037
==============================================================================
>>> T_test.effect
-1829.2025687192481
>>> T_test.sd
455.39079425193762
>>> T_test.tvalue
-4.0167754636411717
>>> T_test.pvalue
0.0015163772380899498
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.formula.api import ols
>>> dta = sm.datasets.longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = 'GNPDEFL = GNP, UNEMP = 2, YEAR/1829 = 1'
>>> t_test = results.t_test(hypotheses)
>>> print(t_test)
Test for Constraints
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
c0 15.0977 84.937 0.178 0.863 -177.042 207.238
c1 -2.0202 0.488 -8.231 0.000 -3.125 -0.915
c2 1.0001 0.249 0.000 1.000 0.437 1.563
==============================================================================
"""
use_t = bool_like(use_t, "use_t", strict=True, optional=True)
if self.params.ndim == 2:
names = [f'y{i[0]}_{i[1]}'
for i in self.model.data.cov_names]
else:
names = self.model.data.cov_names
mgr = FormulaManager()
lc = mgr.get_linear_constraints(r_matrix, names)
r_matrix, q_matrix = lc.constraint_matrix, lc.constraint_values
num_ttests = r_matrix.shape[0]
num_params = r_matrix.shape[1]
if (cov_p is None and self.normalized_cov_params is None and
not hasattr(self, 'cov_params_default')):
raise ValueError('Need covariance of parameters for computing '
'T statistics')
params = self.params.ravel(order="F")
if num_params != params.shape[0]:
raise ValueError('r_matrix and params are not aligned')
if q_matrix is None:
q_matrix = np.zeros(num_ttests)
else:
q_matrix = np.asarray(q_matrix)
q_matrix = q_matrix.squeeze()
if q_matrix.size > 1:
if q_matrix.shape[0] != num_ttests:
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
if use_t is None:
# switch to use_t false if undefined
use_t = (hasattr(self, 'use_t') and self.use_t)
_effect = np.dot(r_matrix, params)
# Perform the test
if num_ttests > 1:
_sd = np.sqrt(np.diag(self.cov_params(
r_matrix=r_matrix, cov_p=cov_p)))
else:
_sd = np.sqrt(self.cov_params(r_matrix=r_matrix, cov_p=cov_p))
_t = (_effect - q_matrix) * recipr(_sd)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if use_t:
return ContrastResults(effect=_effect, t=_t, sd=_sd,
df_denom=df_resid)
else:
return ContrastResults(effect=_effect, statistic=_t, sd=_sd,
df_denom=df_resid,
distribution='norm') | Compute a t-test for a each linear hypothesis of the form Rb = q.
Parameters
----------
r_matrix : {array_like, str, tuple}
One of:
- array : If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q). If q is given,
can be either a scalar or a length p row vector.
cov_p : array_like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
use_t : bool, optional
If use_t is None, then the default of the model is used. If use_t
is True, then the p-values are based on the t distribution. If
use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
ContrastResults
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
See Also
--------
tvalues : Individual t statistics for the estimated parameters.
f_test : Perform an F tests on model parameters.
patsy.DesignInfo.linear_constraint : Specify a linear constraint.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> r = np.zeros_like(results.params)
>>> r[5:] = [1,-1]
>>> print(r)
[ 0. 0. 0. 0. 0. 1. -1.]
r tests that the coefficients on the 5th and 6th independent
variable are the same.
>>> T_test = results.t_test(r)
>>> print(T_test)
Test for Constraints
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
c0 -1829.2026 455.391 -4.017 0.003 -2859.368 -799.037
==============================================================================
>>> T_test.effect
-1829.2025687192481
>>> T_test.sd
455.39079425193762
>>> T_test.tvalue
-4.0167754636411717
>>> T_test.pvalue
0.0015163772380899498
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.formula.api import ols
>>> dta = sm.datasets.longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = 'GNPDEFL = GNP, UNEMP = 2, YEAR/1829 = 1'
>>> t_test = results.t_test(hypotheses)
>>> print(t_test)
Test for Constraints
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
c0 15.0977 84.937 0.178 0.863 -177.042 207.238
c1 -2.0202 0.488 -8.231 0.000 -3.125 -0.915
c2 1.0001 0.249 0.000 1.000 0.437 1.563
============================================================================== | t_test | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def f_test(self, r_matrix, cov_p=None, invcov=None):
"""
Compute the F-test for a joint linear hypothesis.
This is a special case of `wald_test` that always uses the F
distribution.
Parameters
----------
r_matrix : {array_like, str, tuple}
One of:
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length k row vector.
cov_p : array_like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
invcov : array_like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
Returns
-------
ContrastResults
The results for the test are attributes of this results instance.
See Also
--------
t_test : Perform a single hypothesis test.
wald_test : Perform a Wald-test using a quadratic form.
statsmodels.stats.contrast.ContrastResults : Test results.
patsy.DesignInfo.linear_constraint : Specify a linear constraint.
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> A = np.identity(len(results.params))
>>> A = A[1:,:]
This tests that each coefficient is jointly statistically
significantly different from zero.
>>> print(results.f_test(A))
<F test: F=array([[ 330.28533923]]), p=4.984030528700946e-10, df_denom=9, df_num=6>
Compare this to
>>> results.fvalue
330.2853392346658
>>> results.f_pvalue
4.98403096572e-10
>>> B = np.array(([0,0,1,-1,0,0,0],[0,0,0,0,0,1,-1]))
This tests that the coefficient on the 2nd and 3rd regressors are
equal and jointly that the coefficient on the 5th and 6th regressors
are equal.
>>> print(results.f_test(B))
<F test: F=array([[ 9.74046187]]), p=0.005605288531708235, df_denom=9, df_num=2>
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.datasets import longley
>>> from statsmodels.formula.api import ols
>>> dta = longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
>>> f_test = results.f_test(hypotheses)
>>> print(f_test)
<F test: F=array([[ 144.17976065]]), p=6.322026217355609e-08, df_denom=9, df_num=3>
"""
res = self.wald_test(r_matrix, cov_p=cov_p, invcov=invcov, use_f=True, scalar=True)
return res | Compute the F-test for a joint linear hypothesis.
This is a special case of `wald_test` that always uses the F
distribution.
Parameters
----------
r_matrix : {array_like, str, tuple}
One of:
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length k row vector.
cov_p : array_like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
invcov : array_like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
Returns
-------
ContrastResults
The results for the test are attributes of this results instance.
See Also
--------
t_test : Perform a single hypothesis test.
wald_test : Perform a Wald-test using a quadratic form.
statsmodels.stats.contrast.ContrastResults : Test results.
patsy.DesignInfo.linear_constraint : Specify a linear constraint.
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> A = np.identity(len(results.params))
>>> A = A[1:,:]
This tests that each coefficient is jointly statistically
significantly different from zero.
>>> print(results.f_test(A))
<F test: F=array([[ 330.28533923]]), p=4.984030528700946e-10, df_denom=9, df_num=6>
Compare this to
>>> results.fvalue
330.2853392346658
>>> results.f_pvalue
4.98403096572e-10
>>> B = np.array(([0,0,1,-1,0,0,0],[0,0,0,0,0,1,-1]))
This tests that the coefficient on the 2nd and 3rd regressors are
equal and jointly that the coefficient on the 5th and 6th regressors
are equal.
>>> print(results.f_test(B))
<F test: F=array([[ 9.74046187]]), p=0.005605288531708235, df_denom=9, df_num=2>
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.datasets import longley
>>> from statsmodels.formula.api import ols
>>> dta = longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
>>> f_test = results.f_test(hypotheses)
>>> print(f_test)
<F test: F=array([[ 144.17976065]]), p=6.322026217355609e-08, df_denom=9, df_num=3> | f_test | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def wald_test(self, r_matrix, cov_p=None, invcov=None,
use_f=None, df_constraints=None, scalar=None):
"""
Compute a Wald-test for a joint linear hypothesis.
Parameters
----------
r_matrix : {array_like, str, tuple}
One of:
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors. It is assumed that the
linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length p row vector.
cov_p : array_like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
invcov : array_like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
use_f : bool
If True, then the F-distribution is used. If False, then the
asymptotic distribution, chisquare is used. If use_f is None, then
the F distribution is used if the model specifies that use_t is True.
The test statistic is proportionally adjusted for the distribution
by the number of constraints in the hypothesis.
df_constraints : int, optional
The number of constraints. If not provided the number of
constraints is determined from r_matrix.
scalar : bool, optional
Flag indicating whether the Wald test statistic should be returned
as a sclar float. The current behavior is to return an array.
This will switch to a scalar float after 0.14 is released. To
get the future behavior now, set scalar to True. To silence
the warning and retain the legacy behavior, set scalar to
False.
Returns
-------
ContrastResults
The results for the test are attributes of this results instance.
See Also
--------
f_test : Perform an F tests on model parameters.
t_test : Perform a single hypothesis test.
statsmodels.stats.contrast.ContrastResults : Test results.
patsy.DesignInfo.linear_constraint : Specify a linear constraint.
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
"""
use_f = bool_like(use_f, "use_f", strict=True, optional=True)
scalar = bool_like(scalar, "scalar", strict=True, optional=True)
if use_f is None:
# switch to use_t false if undefined
use_f = (hasattr(self, 'use_t') and self.use_t)
if self.params.ndim == 2:
names = [f'y{i[0]}_{i[1]}'
for i in self.model.data.cov_names]
else:
names = self.model.data.cov_names
params = self.params.ravel(order="F")
mgr = FormulaManager()
lc = mgr.get_linear_constraints(r_matrix, names)
r_matrix, q_matrix = lc.constraint_matrix, lc.constraint_values
if (self.normalized_cov_params is None and cov_p is None and
invcov is None and not hasattr(self, 'cov_params_default')):
raise ValueError('need covariance of parameters for computing '
'F statistics')
cparams = np.dot(r_matrix, params[:, None])
J = float(r_matrix.shape[0]) # number of restrictions
if q_matrix is None:
q_matrix = np.zeros(J)
else:
q_matrix = np.asarray(q_matrix)
if q_matrix.ndim == 1:
q_matrix = q_matrix[:, None]
if q_matrix.shape[0] != J:
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
Rbq = cparams - q_matrix
if invcov is None:
cov_p = self.cov_params(r_matrix=r_matrix, cov_p=cov_p)
if np.isnan(cov_p).max():
raise ValueError("r_matrix performs f_test for using "
"dimensions that are asymptotically "
"non-normal")
invcov = np.linalg.pinv(cov_p)
J_ = np.linalg.matrix_rank(cov_p)
if J_ < J:
warnings.warn('covariance of constraints does not have full '
'rank. The number of constraints is %d, but '
'rank is %d' % (J, J_), ValueWarning)
J = J_
# TODO streamline computation, we do not need to compute J if given
if df_constraints is not None:
# let caller override J by df_constraint
J = df_constraints
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
F = nan_dot(nan_dot(Rbq.T, invcov), Rbq)
else:
F = np.dot(np.dot(Rbq.T, invcov), Rbq)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if scalar is None:
warnings.warn(
"The behavior of wald_test will change after 0.14 to returning "
"scalar test statistic values. To get the future behavior now, "
"set scalar to True. To silence this message while retaining "
"the legacy behavior, set scalar to False.",
FutureWarning
)
scalar = False
if scalar and F.size == 1:
F = float(np.squeeze(F))
if use_f:
F /= J
return ContrastResults(F=F, df_denom=df_resid,
df_num=J) #invcov.shape[0])
else:
return ContrastResults(chi2=F, df_denom=J, statistic=F,
distribution='chi2', distargs=(J,)) | Compute a Wald-test for a joint linear hypothesis.
Parameters
----------
r_matrix : {array_like, str, tuple}
One of:
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors. It is assumed that the
linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length p row vector.
cov_p : array_like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
invcov : array_like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
use_f : bool
If True, then the F-distribution is used. If False, then the
asymptotic distribution, chisquare is used. If use_f is None, then
the F distribution is used if the model specifies that use_t is True.
The test statistic is proportionally adjusted for the distribution
by the number of constraints in the hypothesis.
df_constraints : int, optional
The number of constraints. If not provided the number of
constraints is determined from r_matrix.
scalar : bool, optional
Flag indicating whether the Wald test statistic should be returned
as a sclar float. The current behavior is to return an array.
This will switch to a scalar float after 0.14 is released. To
get the future behavior now, set scalar to True. To silence
the warning and retain the legacy behavior, set scalar to
False.
Returns
-------
ContrastResults
The results for the test are attributes of this results instance.
See Also
--------
f_test : Perform an F tests on model parameters.
t_test : Perform a single hypothesis test.
statsmodels.stats.contrast.ContrastResults : Test results.
patsy.DesignInfo.linear_constraint : Specify a linear constraint.
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full. | wald_test | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def wald_test_terms(self, skip_single=False, extra_constraints=None,
combine_terms=None, scalar=None):
"""
Compute a sequence of Wald tests for terms over multiple columns.
This computes joined Wald tests for the hypothesis that all
coefficients corresponding to a `term` are zero.
`Terms` are defined by the underlying formula or by string matching.
Parameters
----------
skip_single : bool
If true, then terms that consist only of a single column and,
therefore, refers only to a single parameter is skipped.
If false, then all terms are included.
extra_constraints : ndarray
Additional constraints to test. Note that this input has not been
tested.
combine_terms : {list[str], None}
Each string in this list is matched to the name of the terms or
the name of the exogenous variables. All columns whose name
includes that string are combined in one joint test.
scalar : bool, optional
Flag indicating whether the Wald test statistic should be returned
as a sclar float. The current behavior is to return an array.
This will switch to a scalar float after 0.14 is released. To
get the future behavior now, set scalar to True. To silence
the warning and retain the legacy behavior, set scalar to
False.
Returns
-------
WaldTestResults
The result instance contains `table` which is a pandas DataFrame
with the test results: test statistic, degrees of freedom and
pvalues.
Examples
--------
>>> res_ols = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", data).fit()
>>> res_ols.wald_test_terms()
<class 'statsmodels.stats.contrast.WaldTestResults'>
F P>F df constraint df denom
Intercept 279.754525 2.37985521351e-22 1 51
C(Duration, Sum) 5.367071 0.0245738436636 1 51
C(Weight, Sum) 12.432445 3.99943118767e-05 2 51
C(Duration, Sum):C(Weight, Sum) 0.176002 0.83912310946 2 51
>>> res_poi = Poisson.from_formula("Days ~ C(Weight) * C(Duration)", \
data).fit(cov_type='HC0')
>>> wt = res_poi.wald_test_terms(skip_single=False, \
combine_terms=['Duration', 'Weight'])
>>> print(wt)
chi2 P>chi2 df constraint
Intercept 15.695625 7.43960374424e-05 1
C(Weight) 16.132616 0.000313940174705 2
C(Duration) 1.009147 0.315107378931 1
C(Weight):C(Duration) 0.216694 0.897315972824 2
Duration 11.187849 0.010752286833 3
Weight 30.263368 4.32586407145e-06 4
"""
# lazy import
mgr = FormulaManager()
result = self
if extra_constraints is None:
extra_constraints = []
if combine_terms is None:
combine_terms = []
model_spec = getattr(result.model.data, 'model_spec', None)
if model_spec is None and extra_constraints is None:
raise ValueError('no constraints, nothing to do')
identity = np.eye(len(result.params))
constraints = []
combined = defaultdict(list)
if model_spec is not None:
for term in model_spec.terms:
cols = mgr.get_slice(model_spec, term)
name = mgr.get_term_name(term)
constraint_matrix = identity[cols]
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
k_constraint = constraint_matrix.shape[0]
if skip_single:
if k_constraint == 1:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname, np.vstack(combined[cname])))
else:
# check by exog/params names if there is no formula info
for col, name in enumerate(result.model.exog_names):
constraint_matrix = np.atleast_2d(identity[col])
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
if skip_single:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname, np.vstack(combined[cname])))
use_t = result.use_t
distribution = ['chi2', 'F'][use_t]
res_wald = []
index = []
for name, constraint in constraints + combined_constraints + extra_constraints:
wt = result.wald_test(constraint, scalar=scalar)
row = [wt.statistic, wt.pvalue, constraint.shape[0]]
if use_t:
row.append(wt.df_denom)
res_wald.append(row)
index.append(name)
# distribution nerutral names
col_names = ['statistic', 'pvalue', 'df_constraint']
if use_t:
col_names.append('df_denom')
# TODO: maybe move DataFrame creation to results class
from pandas import DataFrame
table = DataFrame(res_wald, index=index, columns=col_names)
res = WaldTestResults(None, distribution, None, table=table)
# TODO: remove temp again, added for testing
res.temp = constraints + combined_constraints + extra_constraints
return res | Compute a sequence of Wald tests for terms over multiple columns.
This computes joined Wald tests for the hypothesis that all
coefficients corresponding to a `term` are zero.
`Terms` are defined by the underlying formula or by string matching.
Parameters
----------
skip_single : bool
If true, then terms that consist only of a single column and,
therefore, refers only to a single parameter is skipped.
If false, then all terms are included.
extra_constraints : ndarray
Additional constraints to test. Note that this input has not been
tested.
combine_terms : {list[str], None}
Each string in this list is matched to the name of the terms or
the name of the exogenous variables. All columns whose name
includes that string are combined in one joint test.
scalar : bool, optional
Flag indicating whether the Wald test statistic should be returned
as a sclar float. The current behavior is to return an array.
This will switch to a scalar float after 0.14 is released. To
get the future behavior now, set scalar to True. To silence
the warning and retain the legacy behavior, set scalar to
False.
Returns
-------
WaldTestResults
The result instance contains `table` which is a pandas DataFrame
with the test results: test statistic, degrees of freedom and
pvalues.
Examples
--------
>>> res_ols = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", data).fit()
>>> res_ols.wald_test_terms()
<class 'statsmodels.stats.contrast.WaldTestResults'>
F P>F df constraint df denom
Intercept 279.754525 2.37985521351e-22 1 51
C(Duration, Sum) 5.367071 0.0245738436636 1 51
C(Weight, Sum) 12.432445 3.99943118767e-05 2 51
C(Duration, Sum):C(Weight, Sum) 0.176002 0.83912310946 2 51
>>> res_poi = Poisson.from_formula("Days ~ C(Weight) * C(Duration)", \
data).fit(cov_type='HC0')
>>> wt = res_poi.wald_test_terms(skip_single=False, \
combine_terms=['Duration', 'Weight'])
>>> print(wt)
chi2 P>chi2 df constraint
Intercept 15.695625 7.43960374424e-05 1
C(Weight) 16.132616 0.000313940174705 2
C(Duration) 1.009147 0.315107378931 1
C(Weight):C(Duration) 0.216694 0.897315972824 2
Duration 11.187849 0.010752286833 3
Weight 30.263368 4.32586407145e-06 4 | wald_test_terms | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def t_test_pairwise(self, term_name, method='hs', alpha=0.05,
factor_labels=None):
"""
Perform pairwise t_test with multiple testing corrected p-values.
This uses the formula's model_spec encoding contrast matrix and should
work for all encodings of a main effect.
Parameters
----------
term_name : str
The name of the term for which pairwise comparisons are computed.
Term names for categorical effects are created by patsy and
correspond to the main part of the exog names.
method : {str, list[str]}
The multiple testing p-value correction to apply. The default is
'hs'. See stats.multipletesting.
alpha : float
The significance level for multiple testing reject decision.
factor_labels : {list[str], None}
Labels for the factor levels used for pairwise labels. If not
provided, then the labels from the formula's model_spec are used.
Returns
-------
MultiCompResult
The results are stored as attributes, the main attributes are the
following two. Other attributes are added for debugging purposes
or as background information.
- result_frame : pandas DataFrame with t_test results and multiple
testing corrected p-values.
- contrasts : matrix of constraints of the null hypothesis in the
t_test.
Notes
-----
Status: experimental. Currently only checked for treatment coding with
and without specified reference level.
Currently there are no multiple testing corrected confidence intervals
available.
Examples
--------
>>> res = ols("np.log(Days+1) ~ C(Weight) + C(Duration)", data).fit()
>>> pw = res.t_test_pairwise("C(Weight)")
>>> pw.result_frame
coef std err t P>|t| Conf. Int. Low
2-1 0.632315 0.230003 2.749157 8.028083e-03 0.171563
3-1 1.302555 0.230003 5.663201 5.331513e-07 0.841803
3-2 0.670240 0.230003 2.914044 5.119126e-03 0.209488
Conf. Int. Upp. pvalue-hs reject-hs
2-1 1.093067 0.010212 True
3-1 1.763307 0.000002 True
3-2 1.130992 0.010212 True
"""
res = t_test_pairwise(self, term_name, method=method, alpha=alpha,
factor_labels=factor_labels)
return res | Perform pairwise t_test with multiple testing corrected p-values.
This uses the formula's model_spec encoding contrast matrix and should
work for all encodings of a main effect.
Parameters
----------
term_name : str
The name of the term for which pairwise comparisons are computed.
Term names for categorical effects are created by patsy and
correspond to the main part of the exog names.
method : {str, list[str]}
The multiple testing p-value correction to apply. The default is
'hs'. See stats.multipletesting.
alpha : float
The significance level for multiple testing reject decision.
factor_labels : {list[str], None}
Labels for the factor levels used for pairwise labels. If not
provided, then the labels from the formula's model_spec are used.
Returns
-------
MultiCompResult
The results are stored as attributes, the main attributes are the
following two. Other attributes are added for debugging purposes
or as background information.
- result_frame : pandas DataFrame with t_test results and multiple
testing corrected p-values.
- contrasts : matrix of constraints of the null hypothesis in the
t_test.
Notes
-----
Status: experimental. Currently only checked for treatment coding with
and without specified reference level.
Currently there are no multiple testing corrected confidence intervals
available.
Examples
--------
>>> res = ols("np.log(Days+1) ~ C(Weight) + C(Duration)", data).fit()
>>> pw = res.t_test_pairwise("C(Weight)")
>>> pw.result_frame
coef std err t P>|t| Conf. Int. Low
2-1 0.632315 0.230003 2.749157 8.028083e-03 0.171563
3-1 1.302555 0.230003 5.663201 5.331513e-07 0.841803
3-2 0.670240 0.230003 2.914044 5.119126e-03 0.209488
Conf. Int. Upp. pvalue-hs reject-hs
2-1 1.093067 0.010212 True
3-1 1.763307 0.000002 True
3-2 1.130992 0.010212 True | t_test_pairwise | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def _get_wald_nonlinear(self, func, deriv=None):
"""Experimental method for nonlinear prediction and tests
Parameters
----------
func : callable, f(params)
nonlinear function of the estimation parameters. The return of
the function can be vector valued, i.e. a 1-D array
deriv : function or None
first derivative or Jacobian of func. If deriv is None, then a
numerical derivative will be used. If func returns a 1-D array,
then the `deriv` should have rows corresponding to the elements
of the return of func.
Returns
-------
nl : instance of `NonlinearDeltaCov` with attributes and methods to
calculate the results for the prediction or tests
"""
from statsmodels.stats._delta_method import NonlinearDeltaCov
func_args = None # TODO: not yet implemented, maybe skip - use partial
nl = NonlinearDeltaCov(func, self.params, self.cov_params(),
deriv=deriv, func_args=func_args)
return nl | Experimental method for nonlinear prediction and tests
Parameters
----------
func : callable, f(params)
nonlinear function of the estimation parameters. The return of
the function can be vector valued, i.e. a 1-D array
deriv : function or None
first derivative or Jacobian of func. If deriv is None, then a
numerical derivative will be used. If func returns a 1-D array,
then the `deriv` should have rows corresponding to the elements
of the return of func.
Returns
-------
nl : instance of `NonlinearDeltaCov` with attributes and methods to
calculate the results for the prediction or tests | _get_wald_nonlinear | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def conf_int(self, alpha=.05, cols=None):
"""
Construct confidence interval for the fitted parameters.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval. The default
`alpha` = .05 returns a 95% confidence interval.
cols : array_like, optional
Specifies which confidence intervals to return.
.. deprecated: 0.13
cols is deprecated and will be removed after 0.14 is released.
cols only works when inputs are NumPy arrays and will fail
when using pandas Series or DataFrames as input. You can
subset the confidence intervals using slices.
Returns
-------
array_like
Each row contains [lower, upper] limits of the confidence interval
for the corresponding parameter. The first column contains all
lower, the second column contains all upper limits.
Notes
-----
The confidence interval is based on the standard normal distribution
if self.use_t is False. If self.use_t is True, then uses a Student's t
with self.df_resid_inference (or self.df_resid if df_resid_inference is
not defined) degrees of freedom.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> results.conf_int()
array([[-5496529.48322745, -1467987.78596704],
[ -177.02903529, 207.15277984],
[ -0.1115811 , 0.03994274],
[ -3.12506664, -0.91539297],
[ -1.5179487 , -0.54850503],
[ -0.56251721, 0.460309 ],
[ 798.7875153 , 2859.51541392]])
>>> results.conf_int(cols=(2,3))
array([[-0.1115811 , 0.03994274],
[-3.12506664, -0.91539297]])
"""
bse = self.bse
if self.use_t:
dist = stats.t
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
q = dist.ppf(1 - alpha / 2, df_resid)
else:
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
params = self.params
lower = params - q * bse
upper = params + q * bse
if cols is not None:
warnings.warn(
"cols is deprecated and will be removed after 0.14 is "
"released. cols only works when inputs are NumPy arrays and "
"will fail when using pandas Series or DataFrames as input. "
"Subsets of confidence intervals can be selected using slices "
"of the full confidence interval array.",
FutureWarning
)
cols = np.asarray(cols)
lower = lower[cols]
upper = upper[cols]
return np.asarray(lzip(lower, upper)) | Construct confidence interval for the fitted parameters.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval. The default
`alpha` = .05 returns a 95% confidence interval.
cols : array_like, optional
Specifies which confidence intervals to return.
.. deprecated: 0.13
cols is deprecated and will be removed after 0.14 is released.
cols only works when inputs are NumPy arrays and will fail
when using pandas Series or DataFrames as input. You can
subset the confidence intervals using slices.
Returns
-------
array_like
Each row contains [lower, upper] limits of the confidence interval
for the corresponding parameter. The first column contains all
lower, the second column contains all upper limits.
Notes
-----
The confidence interval is based on the standard normal distribution
if self.use_t is False. If self.use_t is True, then uses a Student's t
with self.df_resid_inference (or self.df_resid if df_resid_inference is
not defined) degrees of freedom.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> results.conf_int()
array([[-5496529.48322745, -1467987.78596704],
[ -177.02903529, 207.15277984],
[ -0.1115811 , 0.03994274],
[ -3.12506664, -0.91539297],
[ -1.5179487 , -0.54850503],
[ -0.56251721, 0.460309 ],
[ 798.7875153 , 2859.51541392]])
>>> results.conf_int(cols=(2,3))
array([[-0.1115811 , 0.03994274],
[-3.12506664, -0.91539297]]) | conf_int | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def save(self, fname, remove_data=False):
"""
Save a pickle of this instance.
Parameters
----------
fname : {str, handle}
A string filename or a file handle.
remove_data : bool
If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None.
Notes
-----
If remove_data is true and the model result does not implement a
remove_data method then this will raise an exception.
"""
from statsmodels.iolib.smpickle import save_pickle
if remove_data:
self.remove_data()
save_pickle(self, fname) | Save a pickle of this instance.
Parameters
----------
fname : {str, handle}
A string filename or a file handle.
remove_data : bool
If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None.
Notes
-----
If remove_data is true and the model result does not implement a
remove_data method then this will raise an exception. | save | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def load(cls, fname):
"""
Load a pickled results instance
.. warning::
Loading pickled models is not secure against erroneous or
maliciously constructed data. Never unpickle data received from
an untrusted or unauthenticated source.
Parameters
----------
fname : {str, handle, pathlib.Path}
A string filename or a file handle.
Returns
-------
Results
The unpickled results instance.
"""
from statsmodels.iolib.smpickle import load_pickle
return load_pickle(fname) | Load a pickled results instance
.. warning::
Loading pickled models is not secure against erroneous or
maliciously constructed data. Never unpickle data received from
an untrusted or unauthenticated source.
Parameters
----------
fname : {str, handle, pathlib.Path}
A string filename or a file handle.
Returns
-------
Results
The unpickled results instance. | load | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def remove_data(self):
"""
Remove data arrays, all nobs arrays from result and model.
This reduces the size of the instance, so it can be pickled with less
memory. Currently tested for use with predict from an unpickled
results and model instance.
.. warning::
Since data and some intermediate results have been removed
calculating new statistics that require them will raise exceptions.
The exception will occur the first time an attribute is accessed
that has been set to None.
Not fully tested for time series models, tsa, and might delete too much
for prediction or not all that would be possible.
The lists of arrays to delete are maintained as attributes of
the result and model instance, except for cached values. These
lists could be changed before calling remove_data.
The attributes to remove are named in:
model._data_attr : arrays attached to both the model instance
and the results instance with the same attribute name.
result._data_in_cache : arrays that may exist as values in
result._cache
result._data_attr_model : arrays attached to the model
instance but not to the results instance
"""
cls = self.__class__
# Note: we cannot just use `getattr(cls, x)` or `getattr(self, x)`
# because of redirection involved with property-like accessors
cls_attrs = {}
for name in dir(cls):
try:
attr = object.__getattribute__(cls, name)
except AttributeError:
pass
else:
cls_attrs[name] = attr
data_attrs = [x for x in cls_attrs
if isinstance(cls_attrs[x], cached_data)]
for name in data_attrs:
self._cache[name] = None
def wipe(obj, att):
# get to last element in attribute path
p = att.split('.')
att_ = p.pop(-1)
try:
obj_ = reduce(getattr, [obj] + p)
if hasattr(obj_, att_):
setattr(obj_, att_, None)
except AttributeError:
pass
model_only = ['model.' + i for i in getattr(self, "_data_attr_model", [])]
model_attr = ['model.' + i for i in self.model._data_attr]
for att in self._data_attr + model_attr + model_only:
if att in data_attrs:
# these have been handled above, and trying to call wipe
# would raise an Exception anyway, so skip these
continue
wipe(self, att)
for key in self._data_in_cache:
try:
self._cache[key] = None
except (AttributeError, KeyError):
pass | Remove data arrays, all nobs arrays from result and model.
This reduces the size of the instance, so it can be pickled with less
memory. Currently tested for use with predict from an unpickled
results and model instance.
.. warning::
Since data and some intermediate results have been removed
calculating new statistics that require them will raise exceptions.
The exception will occur the first time an attribute is accessed
that has been set to None.
Not fully tested for time series models, tsa, and might delete too much
for prediction or not all that would be possible.
The lists of arrays to delete are maintained as attributes of
the result and model instance, except for cached values. These
lists could be changed before calling remove_data.
The attributes to remove are named in:
model._data_attr : arrays attached to both the model instance
and the results instance with the same attribute name.
result._data_in_cache : arrays that may exist as values in
result._cache
result._data_attr_model : arrays attached to the model
instance but not to the results instance | remove_data | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def df_modelwc(self):
"""Model WC"""
# collect different ways of defining the number of parameters, used for
# aic, bic
k_extra = getattr(self.model, "k_extra", 0)
if hasattr(self, 'df_model'):
if hasattr(self, 'k_constant'):
hasconst = self.k_constant
elif hasattr(self, 'hasconst'):
hasconst = self.hasconst
else:
# default assumption
hasconst = 1
return self.df_model + hasconst + k_extra
else:
return self.params.size | Model WC | df_modelwc | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def score_obsv(self):
"""cached Jacobian of log-likelihood
"""
return self.model.score_obs(self.params) | cached Jacobian of log-likelihood | score_obsv | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def hessv(self):
"""cached Hessian of log-likelihood
"""
return self.model.hessian(self.params) | cached Hessian of log-likelihood | hessv | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def covjac(self):
"""
covariance of parameters based on outer product of jacobian of
log-likelihood
"""
# if not hasattr(self, '_results'):
# raise ValueError('need to call fit first')
# #self.fit()
# self.jacv = jacv = self.jac(self._results.params)
jacv = self.score_obsv
return np.linalg.inv(np.dot(jacv.T, jacv)) | covariance of parameters based on outer product of jacobian of
log-likelihood | covjac | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
def covjhj(self):
"""covariance of parameters based on HJJH
dot product of Hessian, Jacobian, Jacobian, Hessian of likelihood
name should be covhjh
"""
jacv = self.score_obsv
hessv = self.hessv
hessinv = np.linalg.inv(hessv)
# self.hessinv = hessin = self.cov_params()
return np.dot(hessinv, np.dot(np.dot(jacv.T, jacv), hessinv)) | covariance of parameters based on HJJH
dot product of Hessian, Jacobian, Jacobian, Hessian of likelihood
name should be covhjh | covjhj | python | statsmodels/statsmodels | statsmodels/base/model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/base/model.py | BSD-3-Clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.