code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def parallel_func(func, n_jobs, verbose=5):
"""Return parallel instance with delayed function
Util function to use joblib only if available
Parameters
----------
func : callable
A function
n_jobs : int
Number of jobs to run in parallel
verbose : int
Verbosity level
Returns
-------
parallel : instance of joblib.Parallel or list
The parallel object
my_func : callable
func if not parallel or delayed(func)
n_jobs : int
Number of jobs >= 0
Examples
--------
>>> from math import sqrt
>>> from statsmodels.tools.parallel import parallel_func
>>> parallel, p_func, n_jobs = parallel_func(sqrt, n_jobs=-1, verbose=0)
>>> print(n_jobs)
>>> parallel(p_func(i**2) for i in range(10))
"""
try:
try:
from joblib import Parallel, delayed
except ImportError:
from sklearn.externals.joblib import Parallel, delayed
parallel = Parallel(n_jobs, verbose=verbose)
my_func = delayed(func)
if n_jobs == -1:
try:
import multiprocessing
n_jobs = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
import warnings
warnings.warn(module_unavailable_doc.format('multiprocessing'),
ModuleUnavailableWarning)
n_jobs = 1
except ImportError:
import warnings
warnings.warn(module_unavailable_doc.format('joblib'),
ModuleUnavailableWarning)
n_jobs = 1
my_func = func
parallel = list
return parallel, my_func, n_jobs | Return parallel instance with delayed function
Util function to use joblib only if available
Parameters
----------
func : callable
A function
n_jobs : int
Number of jobs to run in parallel
verbose : int
Verbosity level
Returns
-------
parallel : instance of joblib.Parallel or list
The parallel object
my_func : callable
func if not parallel or delayed(func)
n_jobs : int
Number of jobs >= 0
Examples
--------
>>> from math import sqrt
>>> from statsmodels.tools.parallel import parallel_func
>>> parallel, p_func, n_jobs = parallel_func(sqrt, n_jobs=-1, verbose=0)
>>> print(n_jobs)
>>> parallel(p_func(i**2) for i in range(10)) | parallel_func | python | statsmodels/statsmodels | statsmodels/tools/parallel.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/parallel.py | BSD-3-Clause |
def discrepancy(sample, bounds=None):
"""Discrepancy.
Compute the centered discrepancy on a given sample.
It is a measure of the uniformity of the points in the parameter space.
The lower the value is, the better the coverage of the parameter space is.
Parameters
----------
sample : array_like (n_samples, k_vars)
The sample to compute the discrepancy from.
bounds : tuple or array_like ([min, k_vars], [max, k_vars])
Desired range of transformed data. The transformation apply the bounds
on the sample and not the theoretical space, unit cube. Thus min and
max values of the sample will coincide with the bounds.
Returns
-------
discrepancy : float
Centered discrepancy.
References
----------
[1] Fang et al. "Design and modeling for computer experiments",
Computer Science and Data Analysis Series Science and Data Analysis
Series, 2006.
"""
sample = np.asarray(sample)
n_sample, dim = sample.shape
# Sample scaling from bounds to unit hypercube
if bounds is not None:
min_ = bounds.min(axis=0)
max_ = bounds.max(axis=0)
sample = (sample - min_) / (max_ - min_)
abs_ = abs(sample - 0.5)
disc1 = np.sum(np.prod(1 + 0.5 * abs_ - 0.5 * abs_ ** 2, axis=1))
prod_arr = 1
for i in range(dim):
s0 = sample[:, i]
prod_arr *= (1 +
0.5 * abs(s0[:, None] - 0.5) + 0.5 * abs(s0 - 0.5) -
0.5 * abs(s0[:, None] - s0))
disc2 = prod_arr.sum()
c2 = ((13.0 / 12.0) ** dim - 2.0 / n_sample * disc1 +
1.0 / (n_sample ** 2) * disc2)
return c2 | Discrepancy.
Compute the centered discrepancy on a given sample.
It is a measure of the uniformity of the points in the parameter space.
The lower the value is, the better the coverage of the parameter space is.
Parameters
----------
sample : array_like (n_samples, k_vars)
The sample to compute the discrepancy from.
bounds : tuple or array_like ([min, k_vars], [max, k_vars])
Desired range of transformed data. The transformation apply the bounds
on the sample and not the theoretical space, unit cube. Thus min and
max values of the sample will coincide with the bounds.
Returns
-------
discrepancy : float
Centered discrepancy.
References
----------
[1] Fang et al. "Design and modeling for computer experiments",
Computer Science and Data Analysis Series Science and Data Analysis
Series, 2006. | discrepancy | python | statsmodels/statsmodels | statsmodels/tools/sequences.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/sequences.py | BSD-3-Clause |
def primes_from_2_to(n):
"""Prime numbers from 2 to *n*.
Parameters
----------
n : int
Sup bound with ``n >= 6``.
Returns
-------
primes : list(int)
Primes in ``2 <= p < n``.
References
----------
[1] `StackOverflow <https://stackoverflow.com/questions/2068372>`_.
"""
sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)
for i in range(1, int(n ** 0.5) // 3 + 1):
if sieve[i]:
k = 3 * i + 1 | 1
sieve[k * k // 3::2 * k] = False
sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False
return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)] | Prime numbers from 2 to *n*.
Parameters
----------
n : int
Sup bound with ``n >= 6``.
Returns
-------
primes : list(int)
Primes in ``2 <= p < n``.
References
----------
[1] `StackOverflow <https://stackoverflow.com/questions/2068372>`_. | primes_from_2_to | python | statsmodels/statsmodels | statsmodels/tools/sequences.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/sequences.py | BSD-3-Clause |
def n_primes(n):
"""List of the n-first prime numbers.
Parameters
----------
n : int
Number of prime numbers wanted.
Returns
-------
primes : list(int)
List of primes.
"""
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269,
271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,
433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,
509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599,
601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673,
677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,
769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947,
953, 967, 971, 977, 983, 991, 997][:n]
if len(primes) < n:
big_number = 10
while 'Not enought primes':
primes = primes_from_2_to(big_number)[:n]
if len(primes) == n:
break
big_number += 1000
return primes | List of the n-first prime numbers.
Parameters
----------
n : int
Number of prime numbers wanted.
Returns
-------
primes : list(int)
List of primes. | n_primes | python | statsmodels/statsmodels | statsmodels/tools/sequences.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/sequences.py | BSD-3-Clause |
def van_der_corput(n_sample, base=2, start_index=0):
"""Van der Corput sequence.
Pseudo-random number generator based on a b-adic expansion.
Parameters
----------
n_sample : int
Number of element of the sequence.
base : int
Base of the sequence.
start_index : int
Index to start the sequence from.
Returns
-------
sequence : list (n_samples,)
Sequence of Van der Corput.
"""
sequence = []
for i in range(start_index, start_index + n_sample):
n_th_number, denom = 0., 1.
quotient = i
while quotient > 0:
quotient, remainder = divmod(quotient, base)
denom *= base
n_th_number += remainder / denom
sequence.append(n_th_number)
return sequence | Van der Corput sequence.
Pseudo-random number generator based on a b-adic expansion.
Parameters
----------
n_sample : int
Number of element of the sequence.
base : int
Base of the sequence.
start_index : int
Index to start the sequence from.
Returns
-------
sequence : list (n_samples,)
Sequence of Van der Corput. | van_der_corput | python | statsmodels/statsmodels | statsmodels/tools/sequences.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/sequences.py | BSD-3-Clause |
def halton(dim, n_sample, bounds=None, start_index=0):
"""Halton sequence.
Pseudo-random number generator that generalize the Van der Corput sequence
for multiple dimensions. Halton sequence use base-two Van der Corput
sequence for the first dimension, base-three for its second and base-n for
its n-dimension.
Parameters
----------
dim : int
Dimension of the parameter space.
n_sample : int
Number of samples to generate in the parametr space.
bounds : tuple or array_like ([min, k_vars], [max, k_vars])
Desired range of transformed data. The transformation apply the bounds
on the sample and not the theoretical space, unit cube. Thus min and
max values of the sample will coincide with the bounds.
start_index : int
Index to start the sequence from.
Returns
-------
sequence : array_like (n_samples, k_vars)
Sequence of Halton.
References
----------
[1] Halton, "On the efficiency of certain quasi-random sequences of points
in evaluating multi-dimensional integrals", Numerische Mathematik, 1960.
Examples
--------
Generate samples from a low discrepancy sequence of Halton.
>>> from statsmodels.tools import sequences
>>> sample = sequences.halton(dim=2, n_sample=5)
Compute the quality of the sample using the discrepancy criterion.
>>> uniformity = sequences.discrepancy(sample)
If some wants to continue an existing design, extra points can be obtained.
>>> sample_continued = sequences.halton(dim=2, n_sample=5, start_index=5)
"""
base = n_primes(dim)
# Generate a sample using a Van der Corput sequence per dimension.
sample = [van_der_corput(n_sample + 1, bdim, start_index) for bdim in base]
sample = np.array(sample).T[1:]
# Sample scaling from unit hypercube to feature range
if bounds is not None:
min_ = bounds.min(axis=0)
max_ = bounds.max(axis=0)
sample = sample * (max_ - min_) + min_
return sample | Halton sequence.
Pseudo-random number generator that generalize the Van der Corput sequence
for multiple dimensions. Halton sequence use base-two Van der Corput
sequence for the first dimension, base-three for its second and base-n for
its n-dimension.
Parameters
----------
dim : int
Dimension of the parameter space.
n_sample : int
Number of samples to generate in the parametr space.
bounds : tuple or array_like ([min, k_vars], [max, k_vars])
Desired range of transformed data. The transformation apply the bounds
on the sample and not the theoretical space, unit cube. Thus min and
max values of the sample will coincide with the bounds.
start_index : int
Index to start the sequence from.
Returns
-------
sequence : array_like (n_samples, k_vars)
Sequence of Halton.
References
----------
[1] Halton, "On the efficiency of certain quasi-random sequences of points
in evaluating multi-dimensional integrals", Numerische Mathematik, 1960.
Examples
--------
Generate samples from a low discrepancy sequence of Halton.
>>> from statsmodels.tools import sequences
>>> sample = sequences.halton(dim=2, n_sample=5)
Compute the quality of the sample using the discrepancy criterion.
>>> uniformity = sequences.discrepancy(sample)
If some wants to continue an existing design, extra points can be obtained.
>>> sample_continued = sequences.halton(dim=2, n_sample=5, start_index=5) | halton | python | statsmodels/statsmodels | statsmodels/tools/sequences.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/sequences.py | BSD-3-Clause |
def combine_indices(groups, prefix='', sep='.', return_labels=False):
"""use np.unique to get integer group indices for product, intersection
"""
if isinstance(groups, tuple):
groups = np.column_stack(groups)
else:
groups = np.asarray(groups)
dt = groups.dtype
is2d = (groups.ndim == 2) # need to store
if is2d:
ncols = groups.shape[1]
if not groups.flags.c_contiguous:
groups = np.array(groups, order='C')
groups_ = groups.view([('', groups.dtype)] * groups.shape[1])
else:
groups_ = groups
uni, uni_idx, uni_inv = np.unique(groups_, return_index=True,
return_inverse=True)
if is2d:
uni = uni.view(dt).reshape(-1, ncols)
# avoiding a view would be
# for t in uni.dtype.fields.values():
# assert (t[0] == dt)
#
# uni.dtype = dt
# uni.shape = (uni.size//ncols, ncols)
if return_labels:
label = [(prefix+sep.join(['%s']*len(uni[0]))) % tuple(ii)
for ii in uni]
return uni_inv, uni_idx, uni, label
else:
return uni_inv, uni_idx, uni | use np.unique to get integer group indices for product, intersection | combine_indices | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def group_sums(x, group, use_bincount=True):
"""simple bincount version, again
group : ndarray, integer
assumed to be consecutive integers
no dtype checking because I want to raise in that case
uses loop over columns of x
for comparison, simple python loop
"""
x = np.asarray(x)
group = np.asarray(group).squeeze()
if x.ndim == 1:
x = x[:, None]
elif x.ndim > 2 and use_bincount:
raise ValueError('not implemented yet')
if use_bincount:
# re-label groups or bincount takes too much memory
if np.max(group) > 2 * x.shape[0]:
group = pd.factorize(group)[0]
return np.array(
[
np.bincount(group, weights=x[:, col])
for col in range(x.shape[1])
]
)
else:
uniques = np.unique(group)
result = np.zeros([len(uniques)] + list(x.shape[1:]))
for ii, cat in enumerate(uniques):
result[ii] = x[group == cat].sum(0)
return result | simple bincount version, again
group : ndarray, integer
assumed to be consecutive integers
no dtype checking because I want to raise in that case
uses loop over columns of x
for comparison, simple python loop | group_sums | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def group_sums_dummy(x, group_dummy):
"""sum by groups given group dummy variable
group_dummy can be either ndarray or sparse matrix
"""
if data_util._is_using_ndarray_type(group_dummy, None):
return np.dot(x.T, group_dummy)
else: # check for sparse
return x.T * group_dummy | sum by groups given group dummy variable
group_dummy can be either ndarray or sparse matrix | group_sums_dummy | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def dummy_sparse(groups):
"""create a sparse indicator from a group array with integer labels
Parameters
----------
groups : ndarray, int, 1d (nobs,)
an array of group indicators for each observation. Group levels are
assumed to be defined as consecutive integers, i.e. range(n_groups)
where n_groups is the number of group levels. A group level with no
observations for it will still produce a column of zeros.
Returns
-------
indi : ndarray, int8, 2d (nobs, n_groups)
an indicator array with one row per observation, that has 1 in the
column of the group level for that observation
Examples
--------
>>> g = np.array([0, 0, 2, 1, 1, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi
<7x3 sparse matrix of type '<type 'numpy.int8'>'
with 7 stored elements in Compressed Sparse Row format>
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
current behavior with missing groups
>>> g = np.array([0, 0, 2, 0, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
"""
from scipy import sparse
indptr = np.arange(len(groups)+1)
data = np.ones(len(groups), dtype=np.int8)
indi = sparse.csr_matrix((data, groups, indptr))
return indi | create a sparse indicator from a group array with integer labels
Parameters
----------
groups : ndarray, int, 1d (nobs,)
an array of group indicators for each observation. Group levels are
assumed to be defined as consecutive integers, i.e. range(n_groups)
where n_groups is the number of group levels. A group level with no
observations for it will still produce a column of zeros.
Returns
-------
indi : ndarray, int8, 2d (nobs, n_groups)
an indicator array with one row per observation, that has 1 in the
column of the group level for that observation
Examples
--------
>>> g = np.array([0, 0, 2, 1, 1, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi
<7x3 sparse matrix of type '<type 'numpy.int8'>'
with 7 stored elements in Compressed Sparse Row format>
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
current behavior with missing groups
>>> g = np.array([0, 0, 2, 0, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8) | dummy_sparse | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def dummy(self, drop_idx=None, sparse=False, dtype=int):
"""
drop_idx is only available if sparse=False
drop_idx is supposed to index into uni
"""
uni = self.uni
if drop_idx is not None:
idx = lrange(len(uni))
del idx[drop_idx]
uni = uni[idx]
group = self.group
if not sparse:
return (group[:, None] == uni[None, :]).astype(dtype)
else:
return dummy_sparse(self.group_int) | drop_idx is only available if sparse=False
drop_idx is supposed to index into uni | dummy | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def lag_indices(self, lag):
"""return the index array for lagged values
Warning: if k is larger then the number of observations for an
individual, then no values for that individual are returned.
TODO: for the unbalanced case, I should get the same truncation for
the array with lag=0. From the return of lag_idx we would not know
which individual is missing.
TODO: do I want the full equivalent of lagmat in tsa?
maxlag or lag or lags.
not tested yet
"""
lag_idx = np.asarray(self.groupidx)[:, 1] - lag # asarray or already?
mask_ok = (lag <= lag_idx)
# still an observation that belongs to the same individual
return lag_idx[mask_ok] | return the index array for lagged values
Warning: if k is larger then the number of observations for an
individual, then no values for that individual are returned.
TODO: for the unbalanced case, I should get the same truncation for
the array with lag=0. From the return of lag_idx we would not know
which individual is missing.
TODO: do I want the full equivalent of lagmat in tsa?
maxlag or lag or lags.
not tested yet | lag_indices | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def _is_hierarchical(x):
"""
Checks if the first item of an array-like object is also array-like
If so, we have a MultiIndex and returns True. Else returns False.
"""
item = x[0]
# is there a better way to do this?
if isinstance(item, (list, tuple, np.ndarray, pd.Series, pd.DataFrame)):
return True
else:
return False | Checks if the first item of an array-like object is also array-like
If so, we have a MultiIndex and returns True. Else returns False. | _is_hierarchical | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def __init__(self, index, names=None):
"""
index : index-like
Can be pandas MultiIndex or Index or array-like. If array-like
and is a MultipleIndex (more than one grouping variable),
groups are expected to be in each row. E.g., [('red', 1),
('red', 2), ('green', 1), ('green', 2)]
names : list or str, optional
The names to use for the groups. Should be a str if only
one grouping variable is used.
Notes
-----
If index is already a pandas Index then there is no copy.
"""
if isinstance(index, (Index, MultiIndex)):
if names is not None:
if hasattr(index, 'set_names'): # newer pandas
index.set_names(names, inplace=True)
else:
index.names = names
self.index = index
else: # array_like
if _is_hierarchical(index):
self.index = _make_hierarchical_index(index, names)
else:
self.index = Index(index, name=names)
if names is None:
names = _make_generic_names(self.index)
if hasattr(self.index, 'set_names'):
self.index.set_names(names, inplace=True)
else:
self.index.names = names
self.nobs = len(self.index)
self.nlevels = len(self.index.names)
self.slices = None | index : index-like
Can be pandas MultiIndex or Index or array-like. If array-like
and is a MultipleIndex (more than one grouping variable),
groups are expected to be in each row. E.g., [('red', 1),
('red', 2), ('green', 1), ('green', 2)]
names : list or str, optional
The names to use for the groups. Should be a str if only
one grouping variable is used.
Notes
-----
If index is already a pandas Index then there is no copy. | __init__ | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def reindex(self, index=None, names=None):
"""
Resets the index in-place.
"""
# NOTE: this is not of much use if the rest of the data does not change
# This needs to reset cache
if names is None:
names = self.group_names
self = Grouping(index, names) | Resets the index in-place. | reindex | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def get_slices(self, level=0):
"""
Sets the slices attribute to be a list of indices of the sorted
groups for the first index level. I.e., self.slices[0] is the
index where each observation is in the first (sorted) group.
"""
# TODO: refactor this
groups = self.index.get_level_values(level).unique()
groups = np.sort(np.array(groups))
if isinstance(self.index, MultiIndex):
self.slices = [self.index.get_loc_level(x, level=level)[0]
for x in groups]
else:
self.slices = [self.index.get_loc(x) for x in groups] | Sets the slices attribute to be a list of indices of the sorted
groups for the first index level. I.e., self.slices[0] is the
index where each observation is in the first (sorted) group. | get_slices | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def count_categories(self, level=0):
"""
Sets the attribute counts to equal the bincount of the (integer-valued)
labels.
"""
# TODO: refactor this not to set an attribute. Why would we do this?
self.counts = np.bincount(self.labels[level]) | Sets the attribute counts to equal the bincount of the (integer-valued)
labels. | count_categories | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def check_index(self, is_sorted=True, unique=True, index=None):
"""Sanity checks"""
if not index:
index = self.index
if is_sorted:
test = pd.DataFrame(lrange(len(index)), index=index)
test_sorted = test.sort()
if not test.index.equals(test_sorted.index):
raise Exception('Data is not be sorted')
if unique:
if len(index) != len(index.unique()):
raise Exception('Duplicate index entries') | Sanity checks | check_index | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def sort(self, data, index=None):
"""Applies a (potentially hierarchical) sort operation on a numpy array
or pandas series/dataframe based on the grouping index or a
user-supplied index. Returns an object of the same type as the
original data as well as the matching (sorted) Pandas index.
"""
if index is None:
index = self.index
if data_util._is_using_ndarray_type(data, None):
if data.ndim == 1:
out = pd.Series(data, index=index, copy=True)
out = out.sort_index()
else:
out = pd.DataFrame(data, index=index)
out = out.sort_index(inplace=False) # copies
return np.array(out), out.index
elif data_util._is_using_pandas(data, None):
out = data
out = out.reindex(index) # copies?
out = out.sort_index()
return out, out.index
else:
msg = 'data must be a Numpy array or a Pandas Series/DataFrame'
raise ValueError(msg) | Applies a (potentially hierarchical) sort operation on a numpy array
or pandas series/dataframe based on the grouping index or a
user-supplied index. Returns an object of the same type as the
original data as well as the matching (sorted) Pandas index. | sort | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def transform_dataframe(self, dataframe, function, level=0, **kwargs):
"""Apply function to each column, by group
Assumes that the dataframe already has a proper index"""
if dataframe.shape[0] != self.nobs:
raise Exception('dataframe does not have the same shape as index')
out = dataframe.groupby(level=level).apply(function, **kwargs)
if 1 in out.shape:
return np.ravel(out)
else:
return np.array(out) | Apply function to each column, by group
Assumes that the dataframe already has a proper index | transform_dataframe | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def transform_array(self, array, function, level=0, **kwargs):
"""Apply function to each column, by group
"""
if array.shape[0] != self.nobs:
raise Exception('array does not have the same shape as index')
dataframe = pd.DataFrame(array, index=self.index)
return self.transform_dataframe(dataframe, function, level=level,
**kwargs) | Apply function to each column, by group | transform_array | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def transform_slices(self, array, function, level=0, **kwargs):
"""Apply function to each group. Similar to transform_array but does
not coerce array to a DataFrame and back and only works on a 1D or 2D
numpy array. function is called function(group, group_idx, **kwargs).
"""
array = np.asarray(array)
if array.shape[0] != self.nobs:
raise Exception('array does not have the same shape as index')
# always reset because level is given. need to refactor this.
self.get_slices(level=level)
processed = []
for s in self.slices:
if array.ndim == 2:
subset = array[s, :]
elif array.ndim == 1:
subset = array[s]
processed.append(function(subset, s, **kwargs))
processed = np.array(processed)
return processed.reshape(-1, processed.shape[-1]) | Apply function to each group. Similar to transform_array but does
not coerce array to a DataFrame and back and only works on a 1D or 2D
numpy array. function is called function(group, group_idx, **kwargs). | transform_slices | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def dummy_sparse(self, level=0):
"""create a sparse indicator from a group array with integer labels
Parameters
----------
groups : ndarray, int, 1d (nobs,)
An array of group indicators for each observation. Group levels
are assumed to be defined as consecutive integers, i.e.
range(n_groups) where n_groups is the number of group levels.
A group level with no observations for it will still produce a
column of zeros.
Returns
-------
indi : ndarray, int8, 2d (nobs, n_groups)
an indicator array with one row per observation, that has 1 in the
column of the group level for that observation
Examples
--------
>>> g = np.array([0, 0, 2, 1, 1, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi
<7x3 sparse matrix of type '<type 'numpy.int8'>'
with 7 stored elements in Compressed Sparse Row format>
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
current behavior with missing groups
>>> g = np.array([0, 0, 2, 0, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
"""
indi = dummy_sparse(self.labels[level])
self._dummies = indi | create a sparse indicator from a group array with integer labels
Parameters
----------
groups : ndarray, int, 1d (nobs,)
An array of group indicators for each observation. Group levels
are assumed to be defined as consecutive integers, i.e.
range(n_groups) where n_groups is the number of group levels.
A group level with no observations for it will still produce a
column of zeros.
Returns
-------
indi : ndarray, int8, 2d (nobs, n_groups)
an indicator array with one row per observation, that has 1 in the
column of the group level for that observation
Examples
--------
>>> g = np.array([0, 0, 2, 1, 1, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi
<7x3 sparse matrix of type '<type 'numpy.int8'>'
with 7 stored elements in Compressed Sparse Row format>
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
current behavior with missing groups
>>> g = np.array([0, 0, 2, 0, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8) | dummy_sparse | python | statsmodels/statsmodels | statsmodels/tools/grouputils.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/grouputils.py | BSD-3-Clause |
def check_ftest_pvalues(results):
"""
Check that the outputs of `res.wald_test` produces pvalues that
match res.pvalues.
Check that the string representations of `res.summary()` and (possibly)
`res.summary2()` correctly label either the t or z-statistic.
Parameters
----------
results : Results
Raises
------
AssertionError
"""
res = results
use_t = res.use_t
k_vars = len(res.params)
# check default use_t
pvals = [
res.wald_test(np.eye(k_vars)[k], use_f=use_t, scalar=True).pvalue
for k in range(k_vars)
]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# automatic use_f based on results class use_t
pvals = [
res.wald_test(np.eye(k_vars)[k], scalar=True).pvalue
for k in range(k_vars)
]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# TODO: Separate these out into summary/summary2 tests?
# label for pvalues in summary
string_use_t = "P>|z|" if use_t is False else "P>|t|"
summ = str(res.summary())
assert_(string_use_t in summ)
# try except for models that do not have summary2
try:
summ2 = str(res.summary2())
except AttributeError:
pass
else:
assert_(string_use_t in summ2) | Check that the outputs of `res.wald_test` produces pvalues that
match res.pvalues.
Check that the string representations of `res.summary()` and (possibly)
`res.summary2()` correctly label either the t or z-statistic.
Parameters
----------
results : Results
Raises
------
AssertionError | check_ftest_pvalues | python | statsmodels/statsmodels | statsmodels/tools/_testing.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/_testing.py | BSD-3-Clause |
def check_predict_types(results):
"""
Check that the `predict` method of the given results object produces the
correct output type.
Parameters
----------
results : Results
Raises
------
AssertionError
"""
res = results
# squeeze to make 1d for single regressor test case
p_exog = np.squeeze(np.asarray(res.model.exog[:2]))
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
from statsmodels.compat.pandas import (
assert_frame_equal,
assert_series_equal,
)
# possibly unwrap -- GEE has no wrapper
results = getattr(results, "_results", results)
if isinstance(results, (GLMResults, DiscreteResults)):
# SMOKE test only TODO: mark this somehow
res.predict(p_exog)
res.predict(p_exog.tolist())
res.predict(p_exog[0].tolist())
else:
fitted = res.fittedvalues[:2]
assert_allclose(fitted, res.predict(p_exog), rtol=1e-12)
# this needs reshape to column-vector:
assert_allclose(
fitted, res.predict(np.squeeze(p_exog).tolist()), rtol=1e-12
)
# only one prediction:
assert_allclose(
fitted[:1], res.predict(p_exog[0].tolist()), rtol=1e-12
)
assert_allclose(fitted[:1], res.predict(p_exog[0]), rtol=1e-12)
# Check that pandas wrapping works as expected
exog_index = range(len(p_exog))
predicted = res.predict(p_exog)
cls = pd.Series if p_exog.ndim == 1 else pd.DataFrame
predicted_pandas = res.predict(cls(p_exog, index=exog_index))
# predicted.ndim may not match p_exog.ndim because it may be squeezed
# if p_exog has only one column
cls = pd.Series if predicted.ndim == 1 else pd.DataFrame
predicted_expected = cls(predicted, index=exog_index)
if isinstance(predicted_expected, pd.Series):
assert_series_equal(predicted_expected, predicted_pandas)
else:
assert_frame_equal(predicted_expected, predicted_pandas) | Check that the `predict` method of the given results object produces the
correct output type.
Parameters
----------
results : Results
Raises
------
AssertionError | check_predict_types | python | statsmodels/statsmodels | statsmodels/tools/_testing.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/_testing.py | BSD-3-Clause |
def check_random_state(seed=None):
"""
Turn `seed` into a random number generator.
Parameters
----------
seed : {None, int, array_like[ints], `numpy.random.Generator`,
`numpy.random.RandomState`, `scipy.stats.qmc.QMCEngine`}, optional
If `seed` is None fresh, unpredictable entropy will be pulled
from the OS and `numpy.random.Generator` is used.
If `seed` is an int or ``array_like[ints]``, a new ``Generator``
instance is used, seeded with `seed`.
If `seed` is already a ``Generator``, ``RandomState`` or
`scipy.stats.qmc.QMCEngine` instance then
that instance is used.
`scipy.stats.qmc.QMCEngine` requires SciPy >=1.7. It also means
that the generator only have the method ``random``.
Returns
-------
seed : {`numpy.random.Generator`, `numpy.random.RandomState`,
`scipy.stats.qmc.QMCEngine`}
Random number generator.
"""
if hasattr(stats, "qmc") and \
isinstance(seed, stats.qmc.QMCEngine):
return seed
elif isinstance(seed, np.random.RandomState):
return seed
elif isinstance(seed, np.random.Generator):
return seed
elif seed is not None:
return np.random.default_rng(seed)
else:
import warnings
warnings.warn(_future_warn, FutureWarning)
return np.random.mtrand._rand | Turn `seed` into a random number generator.
Parameters
----------
seed : {None, int, array_like[ints], `numpy.random.Generator`,
`numpy.random.RandomState`, `scipy.stats.qmc.QMCEngine`}, optional
If `seed` is None fresh, unpredictable entropy will be pulled
from the OS and `numpy.random.Generator` is used.
If `seed` is an int or ``array_like[ints]``, a new ``Generator``
instance is used, seeded with `seed`.
If `seed` is already a ``Generator``, ``RandomState`` or
`scipy.stats.qmc.QMCEngine` instance then
that instance is used.
`scipy.stats.qmc.QMCEngine` requires SciPy >=1.7. It also means
that the generator only have the method ``random``.
Returns
-------
seed : {`numpy.random.Generator`, `numpy.random.RandomState`,
`scipy.stats.qmc.QMCEngine`}
Random number generator. | check_random_state | python | statsmodels/statsmodels | statsmodels/tools/rng_qrng.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/rng_qrng.py | BSD-3-Clause |
def mse(x1, x2, axis=0):
"""mean squared error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
mse : ndarray or float
mean squared error along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass, for example
numpy matrices will silently produce an incorrect result.
"""
x1 = np.asanyarray(x1)
x2 = np.asanyarray(x2)
return np.mean((x1 - x2) ** 2, axis=axis) | mean squared error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
mse : ndarray or float
mean squared error along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass, for example
numpy matrices will silently produce an incorrect result. | mse | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def rmse(x1, x2, axis=0):
"""root mean squared error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
rmse : ndarray or float
root mean squared error along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass, for example
numpy matrices will silently produce an incorrect result.
"""
x1 = np.asanyarray(x1)
x2 = np.asanyarray(x2)
return np.sqrt(mse(x1, x2, axis=axis)) | root mean squared error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
rmse : ndarray or float
root mean squared error along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass, for example
numpy matrices will silently produce an incorrect result. | rmse | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def rmspe(y, y_hat, axis=0, zeros=np.nan):
"""
Root Mean Squared Percentage Error
Parameters
----------
y : array_like
The actual value.
y_hat : array_like
The predicted value.
axis : int
Axis along which the summary statistic is calculated
zeros : float
Value to assign to error where y is zero
Returns
-------
rmspe : ndarray or float
Root Mean Squared Percentage Error along given axis.
"""
y_hat = np.asarray(y_hat)
y = np.asarray(y)
error = y - y_hat
loc = y != 0
loc = loc.ravel()
percentage_error = np.full_like(error, zeros)
percentage_error.flat[loc] = error.flat[loc] / y.flat[loc]
mspe = np.nanmean(percentage_error ** 2, axis=axis) * 100
return np.sqrt(mspe) | Root Mean Squared Percentage Error
Parameters
----------
y : array_like
The actual value.
y_hat : array_like
The predicted value.
axis : int
Axis along which the summary statistic is calculated
zeros : float
Value to assign to error where y is zero
Returns
-------
rmspe : ndarray or float
Root Mean Squared Percentage Error along given axis. | rmspe | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def maxabs(x1, x2, axis=0):
"""maximum absolute error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
maxabs : ndarray or float
maximum absolute difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass.
"""
x1 = np.asanyarray(x1)
x2 = np.asanyarray(x2)
return np.max(np.abs(x1 - x2), axis=axis) | maximum absolute error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
maxabs : ndarray or float
maximum absolute difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass. | maxabs | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def meanabs(x1, x2, axis=0):
"""mean absolute error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
meanabs : ndarray or float
mean absolute difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass.
"""
x1 = np.asanyarray(x1)
x2 = np.asanyarray(x2)
return np.mean(np.abs(x1 - x2), axis=axis) | mean absolute error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
meanabs : ndarray or float
mean absolute difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass. | meanabs | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def medianabs(x1, x2, axis=0):
"""median absolute error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
medianabs : ndarray or float
median absolute difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass.
"""
x1 = np.asanyarray(x1)
x2 = np.asanyarray(x2)
return np.median(np.abs(x1 - x2), axis=axis) | median absolute error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
medianabs : ndarray or float
median absolute difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass. | medianabs | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def bias(x1, x2, axis=0):
"""bias, mean error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
bias : ndarray or float
bias, or mean difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass.
"""
x1 = np.asanyarray(x1)
x2 = np.asanyarray(x2)
return np.mean(x1 - x2, axis=axis) | bias, mean error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
bias : ndarray or float
bias, or mean difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass. | bias | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def medianbias(x1, x2, axis=0):
"""median bias, median error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
medianbias : ndarray or float
median bias, or median difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass.
"""
x1 = np.asanyarray(x1)
x2 = np.asanyarray(x2)
return np.median(x1 - x2, axis=axis) | median bias, median error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
medianbias : ndarray or float
median bias, or median difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass. | medianbias | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def vare(x1, x2, ddof=0, axis=0):
"""variance of error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
vare : ndarray or float
variance of difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass.
"""
x1 = np.asanyarray(x1)
x2 = np.asanyarray(x2)
return np.var(x1 - x2, ddof=ddof, axis=axis) | variance of error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
vare : ndarray or float
variance of difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass. | vare | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def stde(x1, x2, ddof=0, axis=0):
"""standard deviation of error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
stde : ndarray or float
standard deviation of difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass.
"""
x1 = np.asanyarray(x1)
x2 = np.asanyarray(x2)
return np.std(x1 - x2, ddof=ddof, axis=axis) | standard deviation of error
Parameters
----------
x1, x2 : array_like
The performance measure depends on the difference between these two
arrays.
axis : int
axis along which the summary statistic is calculated
Returns
-------
stde : ndarray or float
standard deviation of difference along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
This uses ``numpy.asanyarray`` to convert the input. Whether this is the
desired result or not depends on the array subclass. | stde | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def iqr(x1, x2, axis=0):
"""
Interquartile range of error
Parameters
----------
x1 : array_like
One of the inputs into the IQR calculation.
x2 : array_like
The other input into the IQR calculation.
axis : {None, int}
axis along which the summary statistic is calculated
Returns
-------
irq : {float, ndarray}
Interquartile range along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they must broadcast.
"""
x1 = array_like(x1, "x1", dtype=None, ndim=None)
x2 = array_like(x2, "x1", dtype=None, ndim=None)
if axis is None:
x1 = x1.ravel()
x2 = x2.ravel()
axis = 0
xdiff = np.sort(x1 - x2, axis=axis)
nobs = x1.shape[axis]
idx = np.round((nobs - 1) * np.array([0.25, 0.75])).astype(int)
sl = [slice(None)] * xdiff.ndim
sl[axis] = idx
iqr = np.diff(xdiff[tuple(sl)], axis=axis)
iqr = np.squeeze(iqr) # drop reduced dimension
return iqr | Interquartile range of error
Parameters
----------
x1 : array_like
One of the inputs into the IQR calculation.
x2 : array_like
The other input into the IQR calculation.
axis : {None, int}
axis along which the summary statistic is calculated
Returns
-------
irq : {float, ndarray}
Interquartile range along given axis.
Notes
-----
If ``x1`` and ``x2`` have different shapes, then they must broadcast. | iqr | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def aic(llf, nobs, df_modelwc):
"""
Akaike information criterion
Parameters
----------
llf : {float, array_like}
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aic : float
information criterion
References
----------
https://en.wikipedia.org/wiki/Akaike_information_criterion
"""
return -2.0 * llf + 2.0 * df_modelwc | Akaike information criterion
Parameters
----------
llf : {float, array_like}
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aic : float
information criterion
References
----------
https://en.wikipedia.org/wiki/Akaike_information_criterion | aic | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def aicc(llf, nobs, df_modelwc):
"""
Akaike information criterion (AIC) with small sample correction
Parameters
----------
llf : {float, array_like}
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aicc : float
information criterion
References
----------
https://en.wikipedia.org/wiki/Akaike_information_criterion#AICc
Notes
-----
Returns +inf if the effective degrees of freedom, defined as
``nobs - df_modelwc - 1.0``, is <= 0.
"""
dof_eff = nobs - df_modelwc - 1.0
if dof_eff > 0:
return -2.0 * llf + 2.0 * df_modelwc * nobs / dof_eff
else:
return np.inf | Akaike information criterion (AIC) with small sample correction
Parameters
----------
llf : {float, array_like}
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aicc : float
information criterion
References
----------
https://en.wikipedia.org/wiki/Akaike_information_criterion#AICc
Notes
-----
Returns +inf if the effective degrees of freedom, defined as
``nobs - df_modelwc - 1.0``, is <= 0. | aicc | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def bic(llf, nobs, df_modelwc):
"""
Bayesian information criterion (BIC) or Schwarz criterion
Parameters
----------
llf : {float, array_like}
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
bic : float
information criterion
References
----------
https://en.wikipedia.org/wiki/Bayesian_information_criterion
"""
return -2.0 * llf + np.log(nobs) * df_modelwc | Bayesian information criterion (BIC) or Schwarz criterion
Parameters
----------
llf : {float, array_like}
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
bic : float
information criterion
References
----------
https://en.wikipedia.org/wiki/Bayesian_information_criterion | bic | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def hqic(llf, nobs, df_modelwc):
"""
Hannan-Quinn information criterion (HQC)
Parameters
----------
llf : {float, array_like}
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
hqic : float
information criterion
References
----------
Wikipedia does not say much
"""
return -2.0 * llf + 2 * np.log(np.log(nobs)) * df_modelwc | Hannan-Quinn information criterion (HQC)
Parameters
----------
llf : {float, array_like}
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
hqic : float
information criterion
References
----------
Wikipedia does not say much | hqic | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def aicc_sigma(sigma2, nobs, df_modelwc, islog=False):
"""
Akaike information criterion (AIC) with small sample correction
Parameters
----------
sigma2 : float
estimate of the residual variance or determinant of Sigma_hat in the
multivariate case. If islog is true, then it is assumed that sigma
is already log-ed, for example logdetSigma.
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aicc : float
information criterion
Notes
-----
A constant has been dropped in comparison to the loglikelihood base
information criteria. These should be used to compare for comparable
models.
References
----------
https://en.wikipedia.org/wiki/Akaike_information_criterion#AICc
"""
if not islog:
sigma2 = np.log(sigma2)
return sigma2 + aicc(0, nobs, df_modelwc) / nobs | Akaike information criterion (AIC) with small sample correction
Parameters
----------
sigma2 : float
estimate of the residual variance or determinant of Sigma_hat in the
multivariate case. If islog is true, then it is assumed that sigma
is already log-ed, for example logdetSigma.
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aicc : float
information criterion
Notes
-----
A constant has been dropped in comparison to the loglikelihood base
information criteria. These should be used to compare for comparable
models.
References
----------
https://en.wikipedia.org/wiki/Akaike_information_criterion#AICc | aicc_sigma | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def bic_sigma(sigma2, nobs, df_modelwc, islog=False):
"""Bayesian information criterion (BIC) or Schwarz criterion
Parameters
----------
sigma2 : float
estimate of the residual variance or determinant of Sigma_hat in the
multivariate case. If islog is true, then it is assumed that sigma
is already log-ed, for example logdetSigma.
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
bic : float
information criterion
Notes
-----
A constant has been dropped in comparison to the loglikelihood base
information criteria. These should be used to compare for comparable
models.
References
----------
https://en.wikipedia.org/wiki/Bayesian_information_criterion
"""
if not islog:
sigma2 = np.log(sigma2)
return sigma2 + bic(0, nobs, df_modelwc) / nobs | Bayesian information criterion (BIC) or Schwarz criterion
Parameters
----------
sigma2 : float
estimate of the residual variance or determinant of Sigma_hat in the
multivariate case. If islog is true, then it is assumed that sigma
is already log-ed, for example logdetSigma.
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
bic : float
information criterion
Notes
-----
A constant has been dropped in comparison to the loglikelihood base
information criteria. These should be used to compare for comparable
models.
References
----------
https://en.wikipedia.org/wiki/Bayesian_information_criterion | bic_sigma | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def hqic_sigma(sigma2, nobs, df_modelwc, islog=False):
"""Hannan-Quinn information criterion (HQC)
Parameters
----------
sigma2 : float
estimate of the residual variance or determinant of Sigma_hat in the
multivariate case. If islog is true, then it is assumed that sigma
is already log-ed, for example logdetSigma.
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
hqic : float
information criterion
Notes
-----
A constant has been dropped in comparison to the loglikelihood base
information criteria. These should be used to compare for comparable
models.
References
----------
xxx
"""
if not islog:
sigma2 = np.log(sigma2)
return sigma2 + hqic(0, nobs, df_modelwc) / nobs | Hannan-Quinn information criterion (HQC)
Parameters
----------
sigma2 : float
estimate of the residual variance or determinant of Sigma_hat in the
multivariate case. If islog is true, then it is assumed that sigma
is already log-ed, for example logdetSigma.
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
hqic : float
information criterion
Notes
-----
A constant has been dropped in comparison to the loglikelihood base
information criteria. These should be used to compare for comparable
models.
References
----------
xxx | hqic_sigma | python | statsmodels/statsmodels | statsmodels/tools/eval_measures.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/eval_measures.py | BSD-3-Clause |
def approx_fprime(x, f, epsilon=None, args=(), kwargs={}, centered=False):
'''
Gradient of function, or Jacobian if function f returns 1d array
Parameters
----------
x : ndarray
parameters at which the derivative is evaluated
f : function
`f(*((x,)+args), **kwargs)` returning either one value or 1d array
epsilon : float, optional
Stepsize, if None, optimal stepsize is used. This is EPS**(1/2)*x for
`centered` == False and EPS**(1/3)*x for `centered` == True.
args : tuple
Tuple of additional arguments for function `f`.
kwargs : dict
Dictionary of additional keyword arguments for function `f`.
centered : bool
Whether central difference should be returned. If not, does forward
differencing.
Returns
-------
grad : ndarray
gradient or Jacobian
Notes
-----
If f returns a 1d array, it returns a Jacobian. If a 2d array is returned
by f (e.g., with a value for each observation), it returns a 3d array
with the Jacobian of each observation with shape xk x nobs x xk. I.e.,
the Jacobian of the first observation would be [:, 0, :]
'''
n = len(x)
f0 = f(*((x,)+args), **kwargs)
dim = np.atleast_1d(f0).shape # it could be a scalar
grad = np.zeros((n,) + dim, np.promote_types(float, x.dtype))
ei = np.zeros((n,), float)
if not centered:
epsilon = _get_epsilon(x, 2, epsilon, n)
for k in range(n):
ei[k] = epsilon[k]
grad[k, :] = (f(*((x+ei,) + args), **kwargs) - f0)/epsilon[k]
ei[k] = 0.0
else:
epsilon = _get_epsilon(x, 3, epsilon, n) / 2.
for k in range(n):
ei[k] = epsilon[k]
grad[k, :] = (f(*((x+ei,)+args), **kwargs) -
f(*((x-ei,)+args), **kwargs))/(2 * epsilon[k])
ei[k] = 0.0
if n == 1:
return grad.T
else:
return grad.squeeze().T | Gradient of function, or Jacobian if function f returns 1d array
Parameters
----------
x : ndarray
parameters at which the derivative is evaluated
f : function
`f(*((x,)+args), **kwargs)` returning either one value or 1d array
epsilon : float, optional
Stepsize, if None, optimal stepsize is used. This is EPS**(1/2)*x for
`centered` == False and EPS**(1/3)*x for `centered` == True.
args : tuple
Tuple of additional arguments for function `f`.
kwargs : dict
Dictionary of additional keyword arguments for function `f`.
centered : bool
Whether central difference should be returned. If not, does forward
differencing.
Returns
-------
grad : ndarray
gradient or Jacobian
Notes
-----
If f returns a 1d array, it returns a Jacobian. If a 2d array is returned
by f (e.g., with a value for each observation), it returns a 3d array
with the Jacobian of each observation with shape xk x nobs x xk. I.e.,
the Jacobian of the first observation would be [:, 0, :] | approx_fprime | python | statsmodels/statsmodels | statsmodels/tools/numdiff.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/numdiff.py | BSD-3-Clause |
def approx_fprime_cs(x, f, epsilon=None, args=(), kwargs={}):
'''
Calculate gradient or Jacobian with complex step derivative approximation
Parameters
----------
x : ndarray
parameters at which the derivative is evaluated
f : function
`f(*((x,)+args), **kwargs)` returning either one value or 1d array
epsilon : float, optional
Stepsize, if None, optimal stepsize is used. Optimal step-size is
EPS*x. See note.
args : tuple
Tuple of additional arguments for function `f`.
kwargs : dict
Dictionary of additional keyword arguments for function `f`.
Returns
-------
partials : ndarray
array of partial derivatives, Gradient or Jacobian
Notes
-----
The complex-step derivative has truncation error O(epsilon**2), so
truncation error can be eliminated by choosing epsilon to be very small.
The complex-step derivative avoids the problem of round-off error with
small epsilon because there is no subtraction.
'''
# From Guilherme P. de Freitas, numpy mailing list
# May 04 2010 thread "Improvement of performance"
# http://mail.scipy.org/pipermail/numpy-discussion/2010-May/050250.html
n = len(x)
epsilon = _get_epsilon(x, 1, epsilon, n)
increments = np.identity(n) * 1j * epsilon
# TODO: see if this can be vectorized, but usually dim is small
partials = [f(x+ih, *args, **kwargs).imag / epsilon[i]
for i, ih in enumerate(increments)]
return np.array(partials).T | Calculate gradient or Jacobian with complex step derivative approximation
Parameters
----------
x : ndarray
parameters at which the derivative is evaluated
f : function
`f(*((x,)+args), **kwargs)` returning either one value or 1d array
epsilon : float, optional
Stepsize, if None, optimal stepsize is used. Optimal step-size is
EPS*x. See note.
args : tuple
Tuple of additional arguments for function `f`.
kwargs : dict
Dictionary of additional keyword arguments for function `f`.
Returns
-------
partials : ndarray
array of partial derivatives, Gradient or Jacobian
Notes
-----
The complex-step derivative has truncation error O(epsilon**2), so
truncation error can be eliminated by choosing epsilon to be very small.
The complex-step derivative avoids the problem of round-off error with
small epsilon because there is no subtraction. | approx_fprime_cs | python | statsmodels/statsmodels | statsmodels/tools/numdiff.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/numdiff.py | BSD-3-Clause |
def _approx_fprime_cs_scalar(x, f, epsilon=None, args=(), kwargs={}):
'''
Calculate gradient for scalar parameter with complex step derivatives.
This assumes that the function ``f`` is vectorized for a scalar parameter.
The function value ``f(x)`` has then the same shape as the input ``x``.
The derivative returned by this function also has the same shape as ``x``.
Parameters
----------
x : ndarray
Parameters at which the derivative is evaluated.
f : function
`f(*((x,)+args), **kwargs)` returning either one value or 1d array.
epsilon : float, optional
Stepsize, if None, optimal stepsize is used. Optimal step-size is
EPS*x. See note.
args : tuple
Tuple of additional arguments for function `f`.
kwargs : dict
Dictionary of additional keyword arguments for function `f`.
Returns
-------
partials : ndarray
Array of derivatives, gradient evaluated for parameters ``x``.
Notes
-----
The complex-step derivative has truncation error O(epsilon**2), so
truncation error can be eliminated by choosing epsilon to be very small.
The complex-step derivative avoids the problem of round-off error with
small epsilon because there is no subtraction.
'''
# From Guilherme P. de Freitas, numpy mailing list
# May 04 2010 thread "Improvement of performance"
# http://mail.scipy.org/pipermail/numpy-discussion/2010-May/050250.html
x = np.asarray(x)
n = x.shape[-1]
epsilon = _get_epsilon(x, 1, epsilon, n)
eps = 1j * epsilon
partials = f(x + eps, *args, **kwargs).imag / epsilon
return np.array(partials) | Calculate gradient for scalar parameter with complex step derivatives.
This assumes that the function ``f`` is vectorized for a scalar parameter.
The function value ``f(x)`` has then the same shape as the input ``x``.
The derivative returned by this function also has the same shape as ``x``.
Parameters
----------
x : ndarray
Parameters at which the derivative is evaluated.
f : function
`f(*((x,)+args), **kwargs)` returning either one value or 1d array.
epsilon : float, optional
Stepsize, if None, optimal stepsize is used. Optimal step-size is
EPS*x. See note.
args : tuple
Tuple of additional arguments for function `f`.
kwargs : dict
Dictionary of additional keyword arguments for function `f`.
Returns
-------
partials : ndarray
Array of derivatives, gradient evaluated for parameters ``x``.
Notes
-----
The complex-step derivative has truncation error O(epsilon**2), so
truncation error can be eliminated by choosing epsilon to be very small.
The complex-step derivative avoids the problem of round-off error with
small epsilon because there is no subtraction. | _approx_fprime_cs_scalar | python | statsmodels/statsmodels | statsmodels/tools/numdiff.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/numdiff.py | BSD-3-Clause |
def approx_hess_cs(x, f, epsilon=None, args=(), kwargs={}):
'''Calculate Hessian with complex-step derivative approximation
Parameters
----------
x : array_like
value at which function derivative is evaluated
f : function
function of one array f(x)
epsilon : float
stepsize, if None, then stepsize is automatically chosen
Returns
-------
hess : ndarray
array of partial second derivatives, Hessian
Notes
-----
based on equation 10 in
M. S. RIDOUT: Statistical Applications of the Complex-step Method
of Numerical Differentiation, University of Kent, Canterbury, Kent, U.K.
The stepsize is the same for the complex and the finite difference part.
'''
# TODO: might want to consider lowering the step for pure derivatives
n = len(x)
h = _get_epsilon(x, 3, epsilon, n)
ee = np.diag(h)
hess = np.outer(h, h)
n = len(x)
for i in range(n):
for j in range(i, n):
hess[i, j] = np.squeeze(
(f(*((x + 1j*ee[i, :] + ee[j, :],) + args), **kwargs)
- f(*((x + 1j*ee[i, :] - ee[j, :],)+args),
**kwargs)).imag/2./hess[i, j]
)
hess[j, i] = hess[i, j]
return hess | Calculate Hessian with complex-step derivative approximation
Parameters
----------
x : array_like
value at which function derivative is evaluated
f : function
function of one array f(x)
epsilon : float
stepsize, if None, then stepsize is automatically chosen
Returns
-------
hess : ndarray
array of partial second derivatives, Hessian
Notes
-----
based on equation 10 in
M. S. RIDOUT: Statistical Applications of the Complex-step Method
of Numerical Differentiation, University of Kent, Canterbury, Kent, U.K.
The stepsize is the same for the complex and the finite difference part. | approx_hess_cs | python | statsmodels/statsmodels | statsmodels/tools/numdiff.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/numdiff.py | BSD-3-Clause |
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10, 5))
X[1, 2] = np.nan
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(rnames.tolist(), [0, 2, 3, 4, 5, 6, 7, 8, 9]) | Fixes GH: #144 | test_missing_data_pandas | python | statsmodels/statsmodels | statsmodels/tools/tests/test_data.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/tests/test_data.py | BSD-3-Clause |
def _right_squeeze(arr, stop_dim=0):
"""
Remove trailing singleton dimensions
Parameters
----------
arr : ndarray
Input array
stop_dim : int
Dimension where checking should stop so that shape[i] is not checked
for i < stop_dim
Returns
-------
squeezed : ndarray
Array with all trailing singleton dimensions (0 or 1) removed.
Singleton dimensions for dimension < stop_dim are retained.
"""
last = arr.ndim
for s in reversed(arr.shape):
if s > 1:
break
last -= 1
last = max(last, stop_dim)
return arr.reshape(arr.shape[:last]) | Remove trailing singleton dimensions
Parameters
----------
arr : ndarray
Input array
stop_dim : int
Dimension where checking should stop so that shape[i] is not checked
for i < stop_dim
Returns
-------
squeezed : ndarray
Array with all trailing singleton dimensions (0 or 1) removed.
Singleton dimensions for dimension < stop_dim are retained. | _right_squeeze | python | statsmodels/statsmodels | statsmodels/tools/validation/validation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/validation/validation.py | BSD-3-Clause |
def array_like(
obj,
name,
dtype=np.double,
ndim=1,
maxdim=None,
shape=None,
order=None,
contiguous=False,
optional=False,
writeable=True,
):
"""
Convert array-like to a ndarray and check conditions
Parameters
----------
obj : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
name : str
Name of the variable to use in exceptions
dtype : {None, numpy.dtype, str}
Required dtype. Default is double. If None, does not change the dtype
of obj (if present) or uses NumPy to automatically detect the dtype
ndim : {int, None}
Required number of dimensions of obj. If None, no check is performed.
If the number of dimensions of obj is less than ndim, additional axes
are inserted on the right. See examples.
maxdim : {int, None}
Maximum allowed dimension. Use ``maxdim`` instead of ``ndim`` when
inputs are allowed to have ndim 1, 2, ..., or maxdim.
shape : {tuple[int], None}
Required shape obj. If None, no check is performed. Partially
restricted shapes can be checked using None. See examples.
order : {'C', 'F', None}
Order of the array
contiguous : bool
Ensure that the array's data is contiguous with order ``order``
optional : bool
Flag indicating whether None is allowed
writeable : bool
Whether to ensure the returned array is writeable
Returns
-------
ndarray
The converted input.
Examples
--------
Convert a list or pandas series to an array
>>> import pandas as pd
>>> x = [0, 1, 2, 3]
>>> a = array_like(x, 'x', ndim=1)
>>> a.shape
(4,)
>>> a = array_like(pd.Series(x), 'x', ndim=1)
>>> a.shape
(4,)
>>> type(a.orig)
pandas.core.series.Series
Squeezes singleton dimensions when required
>>> x = np.array(x).reshape((4, 1))
>>> a = array_like(x, 'x', ndim=1)
>>> a.shape
(4,)
Right-appends when required size is larger than actual
>>> x = [0, 1, 2, 3]
>>> a = array_like(x, 'x', ndim=2)
>>> a.shape
(4, 1)
Check only the first and last dimension of the input
>>> x = np.arange(4*10*4).reshape((4, 10, 4))
>>> y = array_like(x, 'x', ndim=3, shape=(4, None, 4))
Check only the first two dimensions
>>> z = array_like(x, 'x', ndim=3, shape=(4, 10))
Raises ValueError if constraints are not satisfied
>>> z = array_like(x, 'x', ndim=2)
Traceback (most recent call last):
...
ValueError: x is required to have ndim 2 but has ndim 3
>>> z = array_like(x, 'x', shape=(10, 4, 4))
Traceback (most recent call last):
...
ValueError: x is required to have shape (10, 4, 4) but has shape (4, 10, 4)
>>> z = array_like(x, 'x', shape=(None, 4, 4))
Traceback (most recent call last):
...
ValueError: x is required to have shape (*, 4, 4) but has shape (4, 10, 4)
"""
if optional and obj is None:
return None
reqs = ["W"] if writeable else []
if order == "C" or contiguous:
reqs += ["C"]
elif order == "F":
reqs += ["F"]
arr = np.require(obj, dtype=dtype, requirements=reqs)
if maxdim is not None:
if arr.ndim > maxdim:
msg = f"{name} must have ndim <= {maxdim}"
raise ValueError(msg)
elif ndim is not None:
if arr.ndim > ndim:
arr = _right_squeeze(arr, stop_dim=ndim)
elif arr.ndim < ndim:
arr = np.reshape(arr, arr.shape + (1,) * (ndim - arr.ndim))
if arr.ndim != ndim:
msg = "{0} is required to have ndim {1} but has ndim {2}"
raise ValueError(msg.format(name, ndim, arr.ndim))
if shape is not None:
for actual, req in zip(arr.shape, shape):
if req is not None and actual != req:
req_shape = str(shape).replace("None, ", "*, ")
msg = "{0} is required to have shape {1} but has shape {2}"
raise ValueError(msg.format(name, req_shape, arr.shape))
return arr | Convert array-like to a ndarray and check conditions
Parameters
----------
obj : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
name : str
Name of the variable to use in exceptions
dtype : {None, numpy.dtype, str}
Required dtype. Default is double. If None, does not change the dtype
of obj (if present) or uses NumPy to automatically detect the dtype
ndim : {int, None}
Required number of dimensions of obj. If None, no check is performed.
If the number of dimensions of obj is less than ndim, additional axes
are inserted on the right. See examples.
maxdim : {int, None}
Maximum allowed dimension. Use ``maxdim`` instead of ``ndim`` when
inputs are allowed to have ndim 1, 2, ..., or maxdim.
shape : {tuple[int], None}
Required shape obj. If None, no check is performed. Partially
restricted shapes can be checked using None. See examples.
order : {'C', 'F', None}
Order of the array
contiguous : bool
Ensure that the array's data is contiguous with order ``order``
optional : bool
Flag indicating whether None is allowed
writeable : bool
Whether to ensure the returned array is writeable
Returns
-------
ndarray
The converted input.
Examples
--------
Convert a list or pandas series to an array
>>> import pandas as pd
>>> x = [0, 1, 2, 3]
>>> a = array_like(x, 'x', ndim=1)
>>> a.shape
(4,)
>>> a = array_like(pd.Series(x), 'x', ndim=1)
>>> a.shape
(4,)
>>> type(a.orig)
pandas.core.series.Series
Squeezes singleton dimensions when required
>>> x = np.array(x).reshape((4, 1))
>>> a = array_like(x, 'x', ndim=1)
>>> a.shape
(4,)
Right-appends when required size is larger than actual
>>> x = [0, 1, 2, 3]
>>> a = array_like(x, 'x', ndim=2)
>>> a.shape
(4, 1)
Check only the first and last dimension of the input
>>> x = np.arange(4*10*4).reshape((4, 10, 4))
>>> y = array_like(x, 'x', ndim=3, shape=(4, None, 4))
Check only the first two dimensions
>>> z = array_like(x, 'x', ndim=3, shape=(4, 10))
Raises ValueError if constraints are not satisfied
>>> z = array_like(x, 'x', ndim=2)
Traceback (most recent call last):
...
ValueError: x is required to have ndim 2 but has ndim 3
>>> z = array_like(x, 'x', shape=(10, 4, 4))
Traceback (most recent call last):
...
ValueError: x is required to have shape (10, 4, 4) but has shape (4, 10, 4)
>>> z = array_like(x, 'x', shape=(None, 4, 4))
Traceback (most recent call last):
...
ValueError: x is required to have shape (*, 4, 4) but has shape (4, 10, 4) | array_like | python | statsmodels/statsmodels | statsmodels/tools/validation/validation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/validation/validation.py | BSD-3-Clause |
def wrap(self, obj, columns=None, append=None, trim_start=0, trim_end=0):
"""
Parameters
----------
obj : {array_like}
The value to wrap like to a pandas Series or DataFrame.
columns : {str, list[str]}
Column names or series name, if obj is 1d.
append : str
String to append to the columns to create a new column name.
trim_start : int
The number of observations to drop from the start of the index, so
that the index applied is index[trim_start:].
trim_end : int
The number of observations to drop from the end of the index , so
that the index applied is index[:nobs - trim_end].
Returns
-------
array_like
A pandas Series or DataFrame, depending on the shape of obj.
"""
obj = np.asarray(obj)
if not self._is_pandas:
return obj
if obj.shape[0] + trim_start + trim_end != self._pandas_obj.shape[0]:
raise ValueError(
"obj must have the same number of elements in "
"axis 0 as orig"
)
index = self._pandas_obj.index
index = index[trim_start: index.shape[0] - trim_end]
if obj.ndim == 1:
if columns is None:
name = getattr(self._pandas_obj, "name", None)
elif isinstance(columns, str):
name = columns
else:
name = columns[0]
if append is not None:
name = append if name is None else f"{name}_{append}"
return pd.Series(obj, name=name, index=index)
elif obj.ndim == 2:
if columns is None:
columns = getattr(self._pandas_obj, "columns", None)
if append is not None:
new = []
for c in columns:
new.append(append if c is None else f"{c}_{append}")
columns = new
return pd.DataFrame(obj, columns=columns, index=index)
else:
raise ValueError("Can only wrap 1 or 2-d array_like") | Parameters
----------
obj : {array_like}
The value to wrap like to a pandas Series or DataFrame.
columns : {str, list[str]}
Column names or series name, if obj is 1d.
append : str
String to append to the columns to create a new column name.
trim_start : int
The number of observations to drop from the start of the index, so
that the index applied is index[trim_start:].
trim_end : int
The number of observations to drop from the end of the index , so
that the index applied is index[:nobs - trim_end].
Returns
-------
array_like
A pandas Series or DataFrame, depending on the shape of obj. | wrap | python | statsmodels/statsmodels | statsmodels/tools/validation/validation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/validation/validation.py | BSD-3-Clause |
def bool_like(value, name, optional=False, strict=False):
"""
Convert to bool or raise if not bool_like
Parameters
----------
value : object
Value to verify
name : str
Variable name for exceptions
optional : bool
Flag indicating whether None is allowed
strict : bool
If True, then only allow bool. If False, allow types that support
casting to bool.
Returns
-------
converted : bool
value converted to a bool
"""
if optional and value is None:
return value
extra_text = " or None" if optional else ""
if strict:
if isinstance(value, bool):
return value
else:
raise TypeError(f"{name} must be a bool{extra_text}")
if hasattr(value, "squeeze") and callable(value.squeeze):
value = value.squeeze()
try:
return bool(value)
except Exception:
raise TypeError(
"{} must be a bool (or bool-compatible)"
"{}".format(name, extra_text)
) | Convert to bool or raise if not bool_like
Parameters
----------
value : object
Value to verify
name : str
Variable name for exceptions
optional : bool
Flag indicating whether None is allowed
strict : bool
If True, then only allow bool. If False, allow types that support
casting to bool.
Returns
-------
converted : bool
value converted to a bool | bool_like | python | statsmodels/statsmodels | statsmodels/tools/validation/validation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/validation/validation.py | BSD-3-Clause |
def int_like(
value: Any, name: str, optional: bool = False, strict: bool = False
) -> Optional[int]:
"""
Convert to int or raise if not int_like
Parameters
----------
value : object
Value to verify
name : str
Variable name for exceptions
optional : bool
Flag indicating whether None is allowed
strict : bool
If True, then only allow int or np.integer that are not bool. If False,
allow types that support integer division by 1 and conversion to int.
Returns
-------
converted : int
value converted to a int
"""
if optional and value is None:
return None
is_bool_timedelta = isinstance(value, (bool, np.timedelta64))
if hasattr(value, "squeeze") and callable(value.squeeze):
value = value.squeeze()
if isinstance(value, (int, np.integer)) and not is_bool_timedelta:
return int(value)
elif not strict and not is_bool_timedelta:
try:
if value == (value // 1):
return int(value)
except Exception:
pass
extra_text = " or None" if optional else ""
raise TypeError(
"{} must be integer_like (int or np.integer, but not bool"
" or timedelta64){}".format(name, extra_text)
) | Convert to int or raise if not int_like
Parameters
----------
value : object
Value to verify
name : str
Variable name for exceptions
optional : bool
Flag indicating whether None is allowed
strict : bool
If True, then only allow int or np.integer that are not bool. If False,
allow types that support integer division by 1 and conversion to int.
Returns
-------
converted : int
value converted to a int | int_like | python | statsmodels/statsmodels | statsmodels/tools/validation/validation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/validation/validation.py | BSD-3-Clause |
def float_like(value, name, optional=False, strict=False):
"""
Convert to float or raise if not float_like
Parameters
----------
value : object
Value to verify
name : str
Variable name for exceptions
optional : bool
Flag indicating whether None is allowed
strict : bool
If True, then only allow int, np.integer, float or np.inexact that are
not bool or complex. If False, allow complex types with 0 imag part or
any other type that is float like in the sense that it support
multiplication by 1.0 and conversion to float.
Returns
-------
converted : float
value converted to a float
"""
if optional and value is None:
return None
is_bool = isinstance(value, bool)
is_complex = isinstance(value, (complex, np.complexfloating))
if hasattr(value, "squeeze") and callable(value.squeeze):
value = value.squeeze()
if isinstance(value, (int, np.integer, float, np.inexact)) and not (
is_bool or is_complex
):
return float(value)
elif not strict and is_complex:
imag = np.imag(value)
if imag == 0:
return float(np.real(value))
elif not strict and not is_bool:
try:
return float(value / 1.0)
except Exception:
pass
extra_text = " or None" if optional else ""
raise TypeError(
"{} must be float_like (float or np.inexact)"
"{}".format(name, extra_text)
) | Convert to float or raise if not float_like
Parameters
----------
value : object
Value to verify
name : str
Variable name for exceptions
optional : bool
Flag indicating whether None is allowed
strict : bool
If True, then only allow int, np.integer, float or np.inexact that are
not bool or complex. If False, allow complex types with 0 imag part or
any other type that is float like in the sense that it support
multiplication by 1.0 and conversion to float.
Returns
-------
converted : float
value converted to a float | float_like | python | statsmodels/statsmodels | statsmodels/tools/validation/validation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/validation/validation.py | BSD-3-Clause |
def string_like(value, name, optional=False, options=None, lower=True):
"""
Check if object is string-like and raise if not
Parameters
----------
value : object
Value to verify.
name : str
Variable name for exceptions.
optional : bool
Flag indicating whether None is allowed.
options : tuple[str]
Allowed values for input parameter `value`.
lower : bool
Convert all case-based characters in `value` into lowercase.
Returns
-------
str
The validated input
Raises
------
TypeError
If the value is not a string or None when optional is True.
ValueError
If the input is not in ``options`` when ``options`` is set.
"""
if value is None:
return None
if not isinstance(value, str):
extra_text = " or None" if optional else ""
raise TypeError(f"{name} must be a string{extra_text}")
if lower:
value = value.lower()
if options is not None and value not in options:
extra_text = "If not None, " if optional else ""
options_text = "'" + "', '".join(options) + "'"
msg = "{}{} must be one of: {}".format(
extra_text, name, options_text
)
raise ValueError(msg)
return value | Check if object is string-like and raise if not
Parameters
----------
value : object
Value to verify.
name : str
Variable name for exceptions.
optional : bool
Flag indicating whether None is allowed.
options : tuple[str]
Allowed values for input parameter `value`.
lower : bool
Convert all case-based characters in `value` into lowercase.
Returns
-------
str
The validated input
Raises
------
TypeError
If the value is not a string or None when optional is True.
ValueError
If the input is not in ``options`` when ``options`` is set. | string_like | python | statsmodels/statsmodels | statsmodels/tools/validation/validation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/validation/validation.py | BSD-3-Clause |
def dict_like(value, name, optional=False, strict=True):
"""
Check if dict_like (dict, Mapping) or raise if not
Parameters
----------
value : object
Value to verify
name : str
Variable name for exceptions
optional : bool
Flag indicating whether None is allowed
strict : bool
If True, then only allow dict. If False, allow any Mapping-like object.
Returns
-------
converted : dict_like
value
"""
if optional and value is None:
return None
if not isinstance(value, Mapping) or (
strict and not (isinstance(value, dict))
):
extra_text = "If not None, " if optional else ""
strict_text = " or dict_like (i.e., a Mapping)" if strict else ""
msg = f"{extra_text}{name} must be a dict{strict_text}"
raise TypeError(msg)
return value | Check if dict_like (dict, Mapping) or raise if not
Parameters
----------
value : object
Value to verify
name : str
Variable name for exceptions
optional : bool
Flag indicating whether None is allowed
strict : bool
If True, then only allow dict. If False, allow any Mapping-like object.
Returns
-------
converted : dict_like
value | dict_like | python | statsmodels/statsmodels | statsmodels/tools/validation/validation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tools/validation/validation.py | BSD-3-Clause |
def _noncentrality_chisquare(chi2_stat, df, alpha=0.05):
"""noncentrality parameter for chi-square statistic
`nc` is zero-truncated umvue
Parameters
----------
chi2_stat : float
Chisquare-statistic, for example from a hypothesis test
df : int or float
Degrees of freedom
alpha : float in (0, 1)
Significance level for the confidence interval, covarage is 1 - alpha.
Returns
-------
HolderTuple
The main attributes are
- ``nc`` : estimate of noncentrality parameter
- ``confint`` : lower and upper bound of confidence interval for `nc``
Other attributes are estimates for nc by different methods.
References
----------
.. [1] Kubokawa, T., C.P. Robert, and A.K.Md.E. Saleh. 1993. “Estimation of
Noncentrality Parameters.”
Canadian Journal of Statistics 21 (1): 45–57.
https://doi.org/10.2307/3315657.
.. [2] Li, Qizhai, Junjian Zhang, and Shuai Dai. 2009. “On Estimating the
Non-Centrality Parameter of a Chi-Squared Distribution.”
Statistics & Probability Letters 79 (1): 98–104.
https://doi.org/10.1016/j.spl.2008.07.025.
"""
alpha_half = alpha / 2
nc_umvue = chi2_stat - df
nc = np.maximum(nc_umvue, 0)
nc_lzd = np.maximum(nc_umvue, chi2_stat / (df + 1))
nc_krs = np.maximum(nc_umvue, chi2_stat * 2 / (df + 2))
nc_median = special.chndtrinc(chi2_stat, df, 0.5)
ci = special.chndtrinc(chi2_stat, df, [1 - alpha_half, alpha_half])
res = Holder(nc=nc,
confint=ci,
nc_umvue=nc_umvue,
nc_lzd=nc_lzd,
nc_krs=nc_krs,
nc_median=nc_median,
name="Noncentrality for chisquare-distributed random variable"
)
return res | noncentrality parameter for chi-square statistic
`nc` is zero-truncated umvue
Parameters
----------
chi2_stat : float
Chisquare-statistic, for example from a hypothesis test
df : int or float
Degrees of freedom
alpha : float in (0, 1)
Significance level for the confidence interval, covarage is 1 - alpha.
Returns
-------
HolderTuple
The main attributes are
- ``nc`` : estimate of noncentrality parameter
- ``confint`` : lower and upper bound of confidence interval for `nc``
Other attributes are estimates for nc by different methods.
References
----------
.. [1] Kubokawa, T., C.P. Robert, and A.K.Md.E. Saleh. 1993. “Estimation of
Noncentrality Parameters.”
Canadian Journal of Statistics 21 (1): 45–57.
https://doi.org/10.2307/3315657.
.. [2] Li, Qizhai, Junjian Zhang, and Shuai Dai. 2009. “On Estimating the
Non-Centrality Parameter of a Chi-Squared Distribution.”
Statistics & Probability Letters 79 (1): 98–104.
https://doi.org/10.1016/j.spl.2008.07.025. | _noncentrality_chisquare | python | statsmodels/statsmodels | statsmodels/stats/effect_size.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/effect_size.py | BSD-3-Clause |
def _noncentrality_f(f_stat, df1, df2, alpha=0.05):
"""noncentrality parameter for f statistic
`nc` is zero-truncated umvue
Parameters
----------
fstat : float
f-statistic, for example from a hypothesis test
df : int or float
Degrees of freedom
alpha : float in (0, 1)
Significance level for the confidence interval, covarage is 1 - alpha.
Returns
-------
HolderTuple
The main attributes are
- ``nc`` : estimate of noncentrality parameter
- ``confint`` : lower and upper bound of confidence interval for `nc``
Other attributes are estimates for nc by different methods.
References
----------
.. [1] Kubokawa, T., C.P. Robert, and A.K.Md.E. Saleh. 1993. “Estimation of
Noncentrality Parameters.” Canadian Journal of Statistics 21 (1): 45–57.
https://doi.org/10.2307/3315657.
"""
alpha_half = alpha / 2
x_s = f_stat * df1 / df2
nc_umvue = (df2 - 2) * x_s - df1
nc = np.maximum(nc_umvue, 0)
nc_krs = np.maximum(nc_umvue, x_s * 2 * (df2 - 1) / (df1 + 2))
nc_median = special.ncfdtrinc(df1, df2, 0.5, f_stat)
ci = special.ncfdtrinc(df1, df2, [1 - alpha_half, alpha_half], f_stat)
res = Holder(nc=nc,
confint=ci,
nc_umvue=nc_umvue,
nc_krs=nc_krs,
nc_median=nc_median,
name="Noncentrality for F-distributed random variable"
)
return res | noncentrality parameter for f statistic
`nc` is zero-truncated umvue
Parameters
----------
fstat : float
f-statistic, for example from a hypothesis test
df : int or float
Degrees of freedom
alpha : float in (0, 1)
Significance level for the confidence interval, covarage is 1 - alpha.
Returns
-------
HolderTuple
The main attributes are
- ``nc`` : estimate of noncentrality parameter
- ``confint`` : lower and upper bound of confidence interval for `nc``
Other attributes are estimates for nc by different methods.
References
----------
.. [1] Kubokawa, T., C.P. Robert, and A.K.Md.E. Saleh. 1993. “Estimation of
Noncentrality Parameters.” Canadian Journal of Statistics 21 (1): 45–57.
https://doi.org/10.2307/3315657. | _noncentrality_f | python | statsmodels/statsmodels | statsmodels/stats/effect_size.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/effect_size.py | BSD-3-Clause |
def _noncentrality_t(t_stat, df, alpha=0.05):
"""noncentrality parameter for t statistic
Parameters
----------
fstat : float
f-statistic, for example from a hypothesis test
df : int or float
Degrees of freedom
alpha : float in (0, 1)
Significance level for the confidence interval, covarage is 1 - alpha.
Returns
-------
HolderTuple
The main attributes are
- ``nc`` : estimate of noncentrality parameter
- ``confint`` : lower and upper bound of confidence interval for `nc``
Other attributes are estimates for nc by different methods.
References
----------
.. [1] Hedges, Larry V. 2016. “Distribution Theory for Glass’s Estimator of
Effect Size and Related Estimators:”
Journal of Educational Statistics, November.
https://doi.org/10.3102/10769986006002107.
"""
alpha_half = alpha / 2
gfac = np.exp(special.gammaln(df/2.-0.5) - special.gammaln(df/2.))
c11 = np.sqrt(df/2.) * gfac
nc = t_stat / c11
nc_median = special.nctdtrinc(df, 0.5, t_stat)
ci = special.nctdtrinc(df, [1 - alpha_half, alpha_half], t_stat)
res = Holder(nc=nc,
confint=ci,
nc_median=nc_median,
name="Noncentrality for t-distributed random variable"
)
return res | noncentrality parameter for t statistic
Parameters
----------
fstat : float
f-statistic, for example from a hypothesis test
df : int or float
Degrees of freedom
alpha : float in (0, 1)
Significance level for the confidence interval, covarage is 1 - alpha.
Returns
-------
HolderTuple
The main attributes are
- ``nc`` : estimate of noncentrality parameter
- ``confint`` : lower and upper bound of confidence interval for `nc``
Other attributes are estimates for nc by different methods.
References
----------
.. [1] Hedges, Larry V. 2016. “Distribution Theory for Glass’s Estimator of
Effect Size and Related Estimators:”
Journal of Educational Statistics, November.
https://doi.org/10.3102/10769986006002107. | _noncentrality_t | python | statsmodels/statsmodels | statsmodels/stats/effect_size.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/effect_size.py | BSD-3-Clause |
def _kurtosis(a):
"""
wrapper for scipy.stats.kurtosis that returns nan instead of raising Error
missing options
"""
try:
res = stats.kurtosis(a)
except ValueError:
res = np.nan
return res | wrapper for scipy.stats.kurtosis that returns nan instead of raising Error
missing options | _kurtosis | python | statsmodels/statsmodels | statsmodels/stats/descriptivestats.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/descriptivestats.py | BSD-3-Clause |
def _skew(a):
"""
wrapper for scipy.stats.skew that returns nan instead of raising Error
missing options
"""
try:
res = stats.skew(a)
except ValueError:
res = np.nan
return res | wrapper for scipy.stats.skew that returns nan instead of raising Error
missing options | _skew | python | statsmodels/statsmodels | statsmodels/stats/descriptivestats.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/descriptivestats.py | BSD-3-Clause |
def sign_test(samp, mu0=0):
"""
Signs test
Parameters
----------
samp : array_like
1d array. The sample for which you want to perform the sign test.
mu0 : float
See Notes for the definition of the sign test. mu0 is 0 by
default, but it is common to set it to the median.
Returns
-------
M
p-value
Notes
-----
The signs test returns
M = (N(+) - N(-))/2
where N(+) is the number of values above `mu0`, N(-) is the number of
values below. Values equal to `mu0` are discarded.
The p-value for M is calculated using the binomial distribution
and can be interpreted the same as for a t-test. The test-statistic
is distributed Binom(min(N(+), N(-)), n_trials, .5) where n_trials
equals N(+) + N(-).
See Also
--------
scipy.stats.wilcoxon
"""
samp = np.asarray(samp)
pos = np.sum(samp > mu0)
neg = np.sum(samp < mu0)
M = (pos - neg) / 2.0
try:
p = stats.binomtest(min(pos, neg), pos + neg, 0.5).pvalue
except AttributeError:
# Remove after min SciPy >= 1.7
p = stats.binom_test(min(pos, neg), pos + neg, 0.5)
return M, p | Signs test
Parameters
----------
samp : array_like
1d array. The sample for which you want to perform the sign test.
mu0 : float
See Notes for the definition of the sign test. mu0 is 0 by
default, but it is common to set it to the median.
Returns
-------
M
p-value
Notes
-----
The signs test returns
M = (N(+) - N(-))/2
where N(+) is the number of values above `mu0`, N(-) is the number of
values below. Values equal to `mu0` are discarded.
The p-value for M is calculated using the binomial distribution
and can be interpreted the same as for a t-test. The test-statistic
is distributed Binom(min(N(+), N(-)), n_trials, .5) where n_trials
equals N(+) + N(-).
See Also
--------
scipy.stats.wilcoxon | sign_test | python | statsmodels/statsmodels | statsmodels/stats/descriptivestats.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/descriptivestats.py | BSD-3-Clause |
def frame(self) -> pd.DataFrame:
"""
Descriptive statistics for both numeric and categorical data
Returns
-------
DataFrame
The statistics
"""
numeric = self.numeric
categorical = self.categorical
if categorical.shape[1] == 0:
return numeric
elif numeric.shape[1] == 0:
return categorical
df = pd.concat([numeric, categorical], axis=1)
return self._reorder(df[self._data.columns]) | Descriptive statistics for both numeric and categorical data
Returns
-------
DataFrame
The statistics | frame | python | statsmodels/statsmodels | statsmodels/stats/descriptivestats.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/descriptivestats.py | BSD-3-Clause |
def numeric(self) -> pd.DataFrame:
"""
Descriptive statistics for numeric data
Returns
-------
DataFrame
The statistics of the numeric columns
"""
df: pd.DataFrame = self._data.loc[:, self._is_numeric]
cols = df.columns
_, k = df.shape
std = df.std()
count = df.count()
mean = df.mean()
mad = (df - mean).abs().mean()
std_err = std.copy()
std_err.loc[count > 0] /= count.loc[count > 0] ** 0.5
if self._use_t:
q = stats.t(count - 1).ppf(1.0 - self._alpha / 2)
else:
q = stats.norm.ppf(1.0 - self._alpha / 2)
def _mode(ser):
dtype = ser.dtype if isinstance(ser.dtype, np.dtype) else ser.dtype.numpy_dtype
ser_no_missing = ser.dropna().to_numpy(dtype=dtype)
kwargs = {} if SP_LT_19 else {"keepdims": True}
mode_res = stats.mode(ser_no_missing, **kwargs)
# Changes in SciPy 1.10
if np.isscalar(mode_res[0]):
return float(mode_res[0]), mode_res[1]
if mode_res[0].shape[0] > 0:
return [float(val) for val in mode_res]
return np.nan, np.nan
mode_values = df.apply(_mode).T
if mode_values.size > 0:
if isinstance(mode_values, pd.DataFrame):
# pandas 1.0 or later
mode = np.asarray(mode_values[0], dtype=float)
mode_counts = np.asarray(mode_values[1], dtype=np.int64)
else:
# pandas before 1.0 returns a Series of 2-elem list
mode = []
mode_counts = []
for idx in mode_values.index:
val = mode_values.loc[idx]
mode.append(val[0])
mode_counts.append(val[1])
mode = np.atleast_1d(mode)
mode_counts = np.atleast_1d(mode_counts)
else:
mode = mode_counts = np.empty(0)
loc = count > 0
mode_freq = np.full(mode.shape[0], np.nan)
mode_freq[loc] = mode_counts[loc] / count.loc[loc]
# TODO: Workaround for pandas AbstractMethodError in extension
# types. Remove when quantile is supported for these
_df = df
try:
from pandas.api.types import is_extension_array_dtype
_df = df.copy()
for col in df:
if is_extension_array_dtype(df[col].dtype):
if _df[col].isnull().any():
_df[col] = _df[col].fillna(np.nan)
except ImportError:
pass
if df.shape[1] > 0:
iqr = _df.quantile(0.75) - _df.quantile(0.25)
else:
iqr = mean
def _safe_jarque_bera(c):
a = np.asarray(c)
if a.shape[0] < 2:
return (np.nan,) * 4
return jarque_bera(a)
jb = df.apply(
lambda x: list(_safe_jarque_bera(x.dropna())), result_type="expand"
).T
nan_mean = mean.copy()
nan_mean.loc[nan_mean == 0] = np.nan
coef_var = std / nan_mean
results = {
"nobs": pd.Series(
np.ones(k, dtype=np.int64) * df.shape[0], index=cols
),
"missing": df.shape[0] - count,
"mean": mean,
"std_err": std_err,
"upper_ci": mean + q * std_err,
"lower_ci": mean - q * std_err,
"std": std,
"iqr": iqr,
"mad": mad,
"coef_var": coef_var,
"range": pd_ptp(df),
"max": df.max(),
"min": df.min(),
"skew": jb[2],
"kurtosis": jb[3],
"iqr_normal": iqr / np.diff(stats.norm.ppf([0.25, 0.75])),
"mad_normal": mad / np.sqrt(2 / np.pi),
"jarque_bera": jb[0],
"jarque_bera_pval": jb[1],
"mode": pd.Series(mode, index=cols),
"mode_freq": pd.Series(mode_freq, index=cols),
"median": df.median(),
}
final = {k: v for k, v in results.items() if k in self._stats}
results_df = pd.DataFrame(
list(final.values()), columns=cols, index=list(final.keys())
)
if "percentiles" not in self._stats:
return results_df
# Pandas before 1.0 cannot handle empty DF
if df.shape[1] > 0:
# TODO: Remove when extension types support quantile
perc = _df.quantile(self._percentiles / 100).astype(float)
else:
perc = pd.DataFrame(index=self._percentiles / 100, dtype=float)
if np.all(np.floor(100 * perc.index) == (100 * perc.index)):
perc.index = [f"{int(100 * idx)}%" for idx in perc.index]
else:
dupe = True
scale = 100
index = perc.index
while dupe:
scale *= 10
idx = np.floor(scale * perc.index)
if np.all(np.diff(idx) > 0):
dupe = False
index = np.floor(scale * index) / (scale / 100)
fmt = f"0.{len(str(scale//100))-1}f"
output = f"{{0:{fmt}}}%"
perc.index = [output.format(val) for val in index]
# Add in the names of the percentiles to the output
self._stats = self._stats + perc.index.tolist()
return self._reorder(pd.concat([results_df, perc], axis=0)) | Descriptive statistics for numeric data
Returns
-------
DataFrame
The statistics of the numeric columns | numeric | python | statsmodels/statsmodels | statsmodels/stats/descriptivestats.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/descriptivestats.py | BSD-3-Clause |
def categorical(self) -> pd.DataFrame:
"""
Descriptive statistics for categorical data
Returns
-------
DataFrame
The statistics of the categorical columns
"""
df = self._data.loc[:, [col for col in self._is_cat_like]]
k = df.shape[1]
cols = df.columns
vc = {col: df[col].value_counts(normalize=True) for col in df}
distinct = pd.Series(
{col: vc[col].shape[0] for col in vc}, dtype=np.int64
)
top = {}
freq = {}
for col in vc:
single = vc[col]
if single.shape[0] >= self._ntop:
top[col] = single.index[: self._ntop]
freq[col] = np.asarray(single.iloc[:5])
else:
val = list(single.index)
val += [None] * (self._ntop - len(val))
top[col] = val
freq_val = list(single)
freq_val += [np.nan] * (self._ntop - len(freq_val))
freq[col] = np.asarray(freq_val)
index = [f"top_{i}" for i in range(1, self._ntop + 1)]
top_df = pd.DataFrame(top, dtype="object", index=index, columns=cols)
index = [f"freq_{i}" for i in range(1, self._ntop + 1)]
freq_df = pd.DataFrame(freq, dtype="object", index=index, columns=cols)
results = {
"nobs": pd.Series(
np.ones(k, dtype=np.int64) * df.shape[0], index=cols
),
"missing": df.shape[0] - df.count(),
"distinct": distinct,
}
final = {k: v for k, v in results.items() if k in self._stats}
results_df = pd.DataFrame(
list(final.values()),
columns=cols,
index=list(final.keys()),
dtype="object",
)
if self._compute_top:
results_df = pd.concat([results_df, top_df], axis=0)
if self._compute_freq:
results_df = pd.concat([results_df, freq_df], axis=0)
return self._reorder(results_df) | Descriptive statistics for categorical data
Returns
-------
DataFrame
The statistics of the categorical columns | categorical | python | statsmodels/statsmodels | statsmodels/stats/descriptivestats.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/descriptivestats.py | BSD-3-Clause |
def summary(self) -> SimpleTable:
"""
Summary table of the descriptive statistics
Returns
-------
SimpleTable
A table instance supporting export to text, csv and LaTeX
"""
df = self.frame.astype(object)
if df.isnull().any().any():
df = df.fillna("")
cols = [str(col) for col in df.columns]
stubs = [str(idx) for idx in df.index]
data = []
for _, row in df.iterrows():
data.append([v for v in row])
def _formatter(v):
if isinstance(v, str):
return v
elif v // 1 == v:
return str(int(v))
return f"{v:0.4g}"
return SimpleTable(
data,
header=cols,
stubs=stubs,
title="Descriptive Statistics",
txt_fmt={"data_fmts": {0: "%s", 1: _formatter}},
datatypes=[1] * len(data),
) | Summary table of the descriptive statistics
Returns
-------
SimpleTable
A table instance supporting export to text, csv and LaTeX | summary | python | statsmodels/statsmodels | statsmodels/stats/descriptivestats.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/descriptivestats.py | BSD-3-Clause |
def corr_nearest(corr, threshold=1e-15, n_fact=100):
'''
Find the nearest correlation matrix that is positive semi-definite.
The function iteratively adjust the correlation matrix by clipping the
eigenvalues of a difference matrix. The diagonal elements are set to one.
Parameters
----------
corr : ndarray, (k, k)
initial correlation matrix
threshold : float
clipping threshold for smallest eigenvalue, see Notes
n_fact : int or float
factor to determine the maximum number of iterations. The maximum
number of iterations is the integer part of the number of columns in
the correlation matrix times n_fact.
Returns
-------
corr_new : ndarray, (optional)
corrected correlation matrix
Notes
-----
The smallest eigenvalue of the corrected correlation matrix is
approximately equal to the ``threshold``.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input correlation matrix is symmetric.
Stops after the first step if correlation matrix is already positive
semi-definite or positive definite, so that smallest eigenvalue is above
threshold. In this case, the returned array is not the original, but
is equal to it within numerical precision.
See Also
--------
corr_clipped
cov_nearest
'''
k_vars = corr.shape[0]
if k_vars != corr.shape[1]:
raise ValueError("matrix is not square")
diff = np.zeros(corr.shape)
x_new = corr.copy()
diag_idx = np.arange(k_vars)
for ii in range(int(len(corr) * n_fact)):
x_adj = x_new - diff
x_psd, clipped = clip_evals(x_adj, value=threshold)
if not clipped:
x_new = x_psd
break
diff = x_psd - x_adj
x_new = x_psd.copy()
x_new[diag_idx, diag_idx] = 1
else:
warnings.warn(iteration_limit_doc, IterationLimitWarning)
return x_new | Find the nearest correlation matrix that is positive semi-definite.
The function iteratively adjust the correlation matrix by clipping the
eigenvalues of a difference matrix. The diagonal elements are set to one.
Parameters
----------
corr : ndarray, (k, k)
initial correlation matrix
threshold : float
clipping threshold for smallest eigenvalue, see Notes
n_fact : int or float
factor to determine the maximum number of iterations. The maximum
number of iterations is the integer part of the number of columns in
the correlation matrix times n_fact.
Returns
-------
corr_new : ndarray, (optional)
corrected correlation matrix
Notes
-----
The smallest eigenvalue of the corrected correlation matrix is
approximately equal to the ``threshold``.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input correlation matrix is symmetric.
Stops after the first step if correlation matrix is already positive
semi-definite or positive definite, so that smallest eigenvalue is above
threshold. In this case, the returned array is not the original, but
is equal to it within numerical precision.
See Also
--------
corr_clipped
cov_nearest | corr_nearest | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def corr_clipped(corr, threshold=1e-15):
'''
Find a near correlation matrix that is positive semi-definite
This function clips the eigenvalues, replacing eigenvalues smaller than
the threshold by the threshold. The new matrix is normalized, so that the
diagonal elements are one.
Compared to corr_nearest, the distance between the original correlation
matrix and the positive definite correlation matrix is larger, however,
it is much faster since it only computes eigenvalues once.
Parameters
----------
corr : ndarray, (k, k)
initial correlation matrix
threshold : float
clipping threshold for smallest eigenvalue, see Notes
Returns
-------
corr_new : ndarray, (optional)
corrected correlation matrix
Notes
-----
The smallest eigenvalue of the corrected correlation matrix is
approximately equal to the ``threshold``. In examples, the
smallest eigenvalue can be by a factor of 10 smaller than the threshold,
e.g. threshold 1e-8 can result in smallest eigenvalue in the range
between 1e-9 and 1e-8.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input correlation matrix is symmetric. The diagonal elements of
returned correlation matrix is set to ones.
If the correlation matrix is already positive semi-definite given the
threshold, then the original correlation matrix is returned.
``cov_clipped`` is 40 or more times faster than ``cov_nearest`` in simple
example, but has a slightly larger approximation error.
See Also
--------
corr_nearest
cov_nearest
'''
x_new, clipped = clip_evals(corr, value=threshold)
if not clipped:
return corr
# cov2corr
x_std = np.sqrt(np.diag(x_new))
x_new = x_new / x_std / x_std[:, None]
return x_new | Find a near correlation matrix that is positive semi-definite
This function clips the eigenvalues, replacing eigenvalues smaller than
the threshold by the threshold. The new matrix is normalized, so that the
diagonal elements are one.
Compared to corr_nearest, the distance between the original correlation
matrix and the positive definite correlation matrix is larger, however,
it is much faster since it only computes eigenvalues once.
Parameters
----------
corr : ndarray, (k, k)
initial correlation matrix
threshold : float
clipping threshold for smallest eigenvalue, see Notes
Returns
-------
corr_new : ndarray, (optional)
corrected correlation matrix
Notes
-----
The smallest eigenvalue of the corrected correlation matrix is
approximately equal to the ``threshold``. In examples, the
smallest eigenvalue can be by a factor of 10 smaller than the threshold,
e.g. threshold 1e-8 can result in smallest eigenvalue in the range
between 1e-9 and 1e-8.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input correlation matrix is symmetric. The diagonal elements of
returned correlation matrix is set to ones.
If the correlation matrix is already positive semi-definite given the
threshold, then the original correlation matrix is returned.
``cov_clipped`` is 40 or more times faster than ``cov_nearest`` in simple
example, but has a slightly larger approximation error.
See Also
--------
corr_nearest
cov_nearest | corr_clipped | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def cov_nearest(cov, method='clipped', threshold=1e-15, n_fact=100,
return_all=False):
"""
Find the nearest covariance matrix that is positive (semi-) definite
This leaves the diagonal, i.e. the variance, unchanged
Parameters
----------
cov : ndarray, (k,k)
initial covariance matrix
method : str
if "clipped", then the faster but less accurate ``corr_clipped`` is
used.if "nearest", then ``corr_nearest`` is used
threshold : float
clipping threshold for smallest eigen value, see Notes
n_fact : int or float
factor to determine the maximum number of iterations in
``corr_nearest``. See its doc string
return_all : bool
if False (default), then only the covariance matrix is returned.
If True, then correlation matrix and standard deviation are
additionally returned.
Returns
-------
cov_ : ndarray
corrected covariance matrix
corr_ : ndarray, (optional)
corrected correlation matrix
std_ : ndarray, (optional)
standard deviation
Notes
-----
This converts the covariance matrix to a correlation matrix. Then, finds
the nearest correlation matrix that is positive semidefinite and converts
it back to a covariance matrix using the initial standard deviation.
The smallest eigenvalue of the intermediate correlation matrix is
approximately equal to the ``threshold``.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input covariance matrix is symmetric.
See Also
--------
corr_nearest
corr_clipped
"""
from statsmodels.stats.moment_helpers import cov2corr, corr2cov
cov_, std_ = cov2corr(cov, return_std=True)
if method == 'clipped':
corr_ = corr_clipped(cov_, threshold=threshold)
else: # method == 'nearest'
corr_ = corr_nearest(cov_, threshold=threshold, n_fact=n_fact)
cov_ = corr2cov(corr_, std_)
if return_all:
return cov_, corr_, std_
else:
return cov_ | Find the nearest covariance matrix that is positive (semi-) definite
This leaves the diagonal, i.e. the variance, unchanged
Parameters
----------
cov : ndarray, (k,k)
initial covariance matrix
method : str
if "clipped", then the faster but less accurate ``corr_clipped`` is
used.if "nearest", then ``corr_nearest`` is used
threshold : float
clipping threshold for smallest eigen value, see Notes
n_fact : int or float
factor to determine the maximum number of iterations in
``corr_nearest``. See its doc string
return_all : bool
if False (default), then only the covariance matrix is returned.
If True, then correlation matrix and standard deviation are
additionally returned.
Returns
-------
cov_ : ndarray
corrected covariance matrix
corr_ : ndarray, (optional)
corrected correlation matrix
std_ : ndarray, (optional)
standard deviation
Notes
-----
This converts the covariance matrix to a correlation matrix. Then, finds
the nearest correlation matrix that is positive semidefinite and converts
it back to a covariance matrix using the initial standard deviation.
The smallest eigenvalue of the intermediate correlation matrix is
approximately equal to the ``threshold``.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input covariance matrix is symmetric.
See Also
--------
corr_nearest
corr_clipped | cov_nearest | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def _nmono_linesearch(obj, grad, x, d, obj_hist, M=10, sig1=0.1,
sig2=0.9, gam=1e-4, maxiter=100):
"""
Implements the non-monotone line search of Grippo et al. (1986),
as described in Birgin, Martinez and Raydan (2013).
Parameters
----------
obj : real-valued function
The objective function, to be minimized
grad : vector-valued function
The gradient of the objective function
x : array_like
The starting point for the line search
d : array_like
The search direction
obj_hist : array_like
Objective function history (must contain at least one value)
M : positive int
Number of previous function points to consider (see references
for details).
sig1 : real
Tuning parameter, see references for details.
sig2 : real
Tuning parameter, see references for details.
gam : real
Tuning parameter, see references for details.
maxiter : int
The maximum number of iterations; returns Nones if convergence
does not occur by this point
Returns
-------
alpha : real
The step value
x : Array_like
The function argument at the final step
obval : Real
The function value at the final step
g : Array_like
The gradient at the final step
Notes
-----
The basic idea is to take a big step in the direction of the
gradient, even if the function value is not decreased (but there
is a maximum allowed increase in terms of the recent history of
the iterates).
References
----------
Grippo L, Lampariello F, Lucidi S (1986). A Nonmonotone Line
Search Technique for Newton's Method. SIAM Journal on Numerical
Analysis, 23, 707-716.
E. Birgin, J.M. Martinez, and M. Raydan. Spectral projected
gradient methods: Review and perspectives. Journal of Statistical
Software (preprint).
"""
alpha = 1.
last_obval = obj(x)
obj_max = max(obj_hist[-M:])
for iter in range(maxiter):
obval = obj(x + alpha*d)
g = grad(x)
gtd = (g * d).sum()
if obval <= obj_max + gam*alpha*gtd:
return alpha, x + alpha*d, obval, g
a1 = -0.5*alpha**2*gtd / (obval - last_obval - alpha*gtd)
if (sig1 <= a1) and (a1 <= sig2*alpha):
alpha = a1
else:
alpha /= 2.
last_obval = obval
return None, None, None, None | Implements the non-monotone line search of Grippo et al. (1986),
as described in Birgin, Martinez and Raydan (2013).
Parameters
----------
obj : real-valued function
The objective function, to be minimized
grad : vector-valued function
The gradient of the objective function
x : array_like
The starting point for the line search
d : array_like
The search direction
obj_hist : array_like
Objective function history (must contain at least one value)
M : positive int
Number of previous function points to consider (see references
for details).
sig1 : real
Tuning parameter, see references for details.
sig2 : real
Tuning parameter, see references for details.
gam : real
Tuning parameter, see references for details.
maxiter : int
The maximum number of iterations; returns Nones if convergence
does not occur by this point
Returns
-------
alpha : real
The step value
x : Array_like
The function argument at the final step
obval : Real
The function value at the final step
g : Array_like
The gradient at the final step
Notes
-----
The basic idea is to take a big step in the direction of the
gradient, even if the function value is not decreased (but there
is a maximum allowed increase in terms of the recent history of
the iterates).
References
----------
Grippo L, Lampariello F, Lucidi S (1986). A Nonmonotone Line
Search Technique for Newton's Method. SIAM Journal on Numerical
Analysis, 23, 707-716.
E. Birgin, J.M. Martinez, and M. Raydan. Spectral projected
gradient methods: Review and perspectives. Journal of Statistical
Software (preprint). | _nmono_linesearch | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def _spg_optim(func, grad, start, project, maxiter=1e4, M=10,
ctol=1e-3, maxiter_nmls=200, lam_min=1e-30,
lam_max=1e30, sig1=0.1, sig2=0.9, gam=1e-4):
"""
Implements the spectral projected gradient method for minimizing a
differentiable function on a convex domain.
Parameters
----------
func : real valued function
The objective function to be minimized.
grad : real array-valued function
The gradient of the objective function
start : array_like
The starting point
project : function
In-place projection of the argument to the domain
of func.
... See notes regarding additional arguments
Returns
-------
rslt : Bunch
rslt.params is the final iterate, other fields describe
convergence status.
Notes
-----
This can be an effective heuristic algorithm for problems where no
guaranteed algorithm for computing a global minimizer is known.
There are a number of tuning parameters, but these generally
should not be changed except for `maxiter` (positive integer) and
`ctol` (small positive real). See the Birgin et al reference for
more information about the tuning parameters.
Reference
---------
E. Birgin, J.M. Martinez, and M. Raydan. Spectral projected
gradient methods: Review and perspectives. Journal of Statistical
Software (preprint). Available at:
http://www.ime.usp.br/~egbirgin/publications/bmr5.pdf
"""
lam = min(10*lam_min, lam_max)
params = start.copy()
gval = grad(params)
obj_hist = [func(params), ]
for itr in range(int(maxiter)):
# Check convergence
df = params - gval
project(df)
df -= params
if np.max(np.abs(df)) < ctol:
return Bunch(**{"Converged": True, "params": params,
"objective_values": obj_hist,
"Message": "Converged successfully"})
# The line search direction
d = params - lam*gval
project(d)
d -= params
# Carry out the nonmonotone line search
alpha, params1, fval, gval1 = _nmono_linesearch(
func,
grad,
params,
d,
obj_hist,
M=M,
sig1=sig1,
sig2=sig2,
gam=gam,
maxiter=maxiter_nmls)
if alpha is None:
return Bunch(**{"Converged": False, "params": params,
"objective_values": obj_hist,
"Message": "Failed in nmono_linesearch"})
obj_hist.append(fval)
s = params1 - params
y = gval1 - gval
sy = (s*y).sum()
if sy <= 0:
lam = lam_max
else:
ss = (s*s).sum()
lam = max(lam_min, min(ss/sy, lam_max))
params = params1
gval = gval1
return Bunch(**{"Converged": False, "params": params,
"objective_values": obj_hist,
"Message": "spg_optim did not converge"}) | Implements the spectral projected gradient method for minimizing a
differentiable function on a convex domain.
Parameters
----------
func : real valued function
The objective function to be minimized.
grad : real array-valued function
The gradient of the objective function
start : array_like
The starting point
project : function
In-place projection of the argument to the domain
of func.
... See notes regarding additional arguments
Returns
-------
rslt : Bunch
rslt.params is the final iterate, other fields describe
convergence status.
Notes
-----
This can be an effective heuristic algorithm for problems where no
guaranteed algorithm for computing a global minimizer is known.
There are a number of tuning parameters, but these generally
should not be changed except for `maxiter` (positive integer) and
`ctol` (small positive real). See the Birgin et al reference for
more information about the tuning parameters.
Reference
---------
E. Birgin, J.M. Martinez, and M. Raydan. Spectral projected
gradient methods: Review and perspectives. Journal of Statistical
Software (preprint). Available at:
http://www.ime.usp.br/~egbirgin/publications/bmr5.pdf | _spg_optim | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def _project_correlation_factors(X):
"""
Project a matrix into the domain of matrices whose row-wise sums
of squares are less than or equal to 1.
The input matrix is modified in-place.
"""
nm = np.sqrt((X*X).sum(1))
ii = np.flatnonzero(nm > 1)
if len(ii) > 0:
X[ii, :] /= nm[ii][:, None] | Project a matrix into the domain of matrices whose row-wise sums
of squares are less than or equal to 1.
The input matrix is modified in-place. | _project_correlation_factors | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def to_matrix(self):
"""
Returns the PSD matrix represented by this instance as a full
(square) matrix.
"""
return np.diag(self.diag) + np.dot(self.root, self.root.T) | Returns the PSD matrix represented by this instance as a full
(square) matrix. | to_matrix | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def decorrelate(self, rhs):
"""
Decorrelate the columns of `rhs`.
Parameters
----------
rhs : array_like
A 2 dimensional array with the same number of rows as the
PSD matrix represented by the class instance.
Returns
-------
C^{-1/2} * rhs, where C is the covariance matrix represented
by this class instance.
Notes
-----
The returned matrix has the identity matrix as its row-wise
population covariance matrix.
This function exploits the factor structure for efficiency.
"""
# I + factor * qval * factor' is the inverse square root of
# the covariance matrix in the homogeneous case where diag =
# 1.
qval = -1 + 1 / np.sqrt(1 + self.scales)
# Decorrelate in the general case.
rhs = rhs / np.sqrt(self.diag)[:, None]
rhs1 = np.dot(self.factor.T, rhs)
rhs1 *= qval[:, None]
rhs1 = np.dot(self.factor, rhs1)
rhs += rhs1
return rhs | Decorrelate the columns of `rhs`.
Parameters
----------
rhs : array_like
A 2 dimensional array with the same number of rows as the
PSD matrix represented by the class instance.
Returns
-------
C^{-1/2} * rhs, where C is the covariance matrix represented
by this class instance.
Notes
-----
The returned matrix has the identity matrix as its row-wise
population covariance matrix.
This function exploits the factor structure for efficiency. | decorrelate | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def solve(self, rhs):
"""
Solve a linear system of equations with factor-structured
coefficients.
Parameters
----------
rhs : array_like
A 2 dimensional array with the same number of rows as the
PSD matrix represented by the class instance.
Returns
-------
C^{-1} * rhs, where C is the covariance matrix represented
by this class instance.
Notes
-----
This function exploits the factor structure for efficiency.
"""
qval = -self.scales / (1 + self.scales)
dr = np.sqrt(self.diag)
rhs = rhs / dr[:, None]
mat = qval[:, None] * np.dot(self.factor.T, rhs)
rhs = rhs + np.dot(self.factor, mat)
return rhs / dr[:, None] | Solve a linear system of equations with factor-structured
coefficients.
Parameters
----------
rhs : array_like
A 2 dimensional array with the same number of rows as the
PSD matrix represented by the class instance.
Returns
-------
C^{-1} * rhs, where C is the covariance matrix represented
by this class instance.
Notes
-----
This function exploits the factor structure for efficiency. | solve | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def logdet(self):
"""
Returns the logarithm of the determinant of a
factor-structured matrix.
"""
logdet = np.sum(np.log(self.diag))
logdet += np.sum(np.log(self.scales))
logdet += np.sum(np.log(1 + 1 / self.scales))
return logdet | Returns the logarithm of the determinant of a
factor-structured matrix. | logdet | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def corr_nearest_factor(corr, rank, ctol=1e-6, lam_min=1e-30,
lam_max=1e30, maxiter=1000):
"""
Find the nearest correlation matrix with factor structure to a
given square matrix.
Parameters
----------
corr : square array
The target matrix (to which the nearest correlation matrix is
sought). Must be square, but need not be positive
semidefinite.
rank : int
The rank of the factor structure of the solution, i.e., the
number of linearly independent columns of X.
ctol : positive real
Convergence criterion.
lam_min : float
Tuning parameter for spectral projected gradient optimization
(smallest allowed step in the search direction).
lam_max : float
Tuning parameter for spectral projected gradient optimization
(largest allowed step in the search direction).
maxiter : int
Maximum number of iterations in spectral projected gradient
optimization.
Returns
-------
rslt : Bunch
rslt.corr is a FactoredPSDMatrix defining the estimated
correlation structure. Other fields of `rslt` contain
returned values from spg_optim.
Notes
-----
A correlation matrix has factor structure if it can be written in
the form I + XX' - diag(XX'), where X is n x k with linearly
independent columns, and with each row having sum of squares at
most equal to 1. The approximation is made in terms of the
Frobenius norm.
This routine is useful when one has an approximate correlation
matrix that is not positive semidefinite, and there is need to
estimate the inverse, square root, or inverse square root of the
population correlation matrix. The factor structure allows these
tasks to be done without constructing any n x n matrices.
This is a non-convex problem with no known guaranteed globally
convergent algorithm for computing the solution. Borsdof, Higham
and Raydan (2010) compared several methods for this problem and
found the spectral projected gradient (SPG) method (used here) to
perform best.
The input matrix `corr` can be a dense numpy array or any scipy
sparse matrix. The latter is useful if the input matrix is
obtained by thresholding a very large sample correlation matrix.
If `corr` is sparse, the calculations are optimized to save
memory, so no working matrix with more than 10^6 elements is
constructed.
References
----------
.. [*] R Borsdof, N Higham, M Raydan (2010). Computing a nearest
correlation matrix with factor structure. SIAM J Matrix Anal Appl,
31:5, 2603-2622.
http://eprints.ma.man.ac.uk/1523/01/covered/MIMS_ep2009_87.pdf
Examples
--------
Hard thresholding a correlation matrix may result in a matrix that
is not positive semidefinite. We can approximate a hard
thresholded correlation matrix with a PSD matrix as follows, where
`corr` is the input correlation matrix.
>>> import numpy as np
>>> from statsmodels.stats.correlation_tools import corr_nearest_factor
>>> np.random.seed(1234)
>>> b = 1.5 - np.random.rand(10, 1)
>>> x = np.random.randn(100,1).dot(b.T) + np.random.randn(100,10)
>>> corr = np.corrcoef(x.T)
>>> corr = corr * (np.abs(corr) >= 0.3)
>>> rslt = corr_nearest_factor(corr, 3)
"""
p, _ = corr.shape
# Starting values (following the PCA method in BHR).
u, s, vt = svds(corr, rank)
X = u * np.sqrt(s)
nm = np.sqrt((X**2).sum(1))
ii = np.flatnonzero(nm > 1e-5)
X[ii, :] /= nm[ii][:, None]
# Zero the diagonal
corr1 = corr.copy()
if type(corr1) is np.ndarray:
np.fill_diagonal(corr1, 0)
elif sparse.issparse(corr1):
corr1.setdiag(np.zeros(corr1.shape[0]))
corr1.eliminate_zeros()
corr1.sort_indices()
else:
raise ValueError("Matrix type not supported")
# The gradient, from lemma 4.1 of BHR.
def grad(X):
gr = np.dot(X, np.dot(X.T, X))
if type(corr1) is np.ndarray:
gr -= np.dot(corr1, X)
else:
gr -= corr1.dot(X)
gr -= (X*X).sum(1)[:, None] * X
return 4*gr
# The objective function (sum of squared deviations between fitted
# and observed arrays).
def func(X):
if type(corr1) is np.ndarray:
M = np.dot(X, X.T)
np.fill_diagonal(M, 0)
M -= corr1
fval = (M*M).sum()
return fval
else:
fval = 0.
# Control the size of intermediates
max_ws = 1e6
bs = int(max_ws / X.shape[0])
ir = 0
while ir < X.shape[0]:
ir2 = min(ir+bs, X.shape[0])
u = np.dot(X[ir:ir2, :], X.T)
ii = np.arange(u.shape[0])
u[ii, ir+ii] = 0
u -= np.asarray(corr1[ir:ir2, :].todense())
fval += (u*u).sum()
ir += bs
return fval
rslt = _spg_optim(func, grad, X, _project_correlation_factors, ctol=ctol,
lam_min=lam_min, lam_max=lam_max, maxiter=maxiter)
root = rslt.params
diag = 1 - (root**2).sum(1)
soln = FactoredPSDMatrix(diag, root)
rslt.corr = soln
del rslt.params
return rslt | Find the nearest correlation matrix with factor structure to a
given square matrix.
Parameters
----------
corr : square array
The target matrix (to which the nearest correlation matrix is
sought). Must be square, but need not be positive
semidefinite.
rank : int
The rank of the factor structure of the solution, i.e., the
number of linearly independent columns of X.
ctol : positive real
Convergence criterion.
lam_min : float
Tuning parameter for spectral projected gradient optimization
(smallest allowed step in the search direction).
lam_max : float
Tuning parameter for spectral projected gradient optimization
(largest allowed step in the search direction).
maxiter : int
Maximum number of iterations in spectral projected gradient
optimization.
Returns
-------
rslt : Bunch
rslt.corr is a FactoredPSDMatrix defining the estimated
correlation structure. Other fields of `rslt` contain
returned values from spg_optim.
Notes
-----
A correlation matrix has factor structure if it can be written in
the form I + XX' - diag(XX'), where X is n x k with linearly
independent columns, and with each row having sum of squares at
most equal to 1. The approximation is made in terms of the
Frobenius norm.
This routine is useful when one has an approximate correlation
matrix that is not positive semidefinite, and there is need to
estimate the inverse, square root, or inverse square root of the
population correlation matrix. The factor structure allows these
tasks to be done without constructing any n x n matrices.
This is a non-convex problem with no known guaranteed globally
convergent algorithm for computing the solution. Borsdof, Higham
and Raydan (2010) compared several methods for this problem and
found the spectral projected gradient (SPG) method (used here) to
perform best.
The input matrix `corr` can be a dense numpy array or any scipy
sparse matrix. The latter is useful if the input matrix is
obtained by thresholding a very large sample correlation matrix.
If `corr` is sparse, the calculations are optimized to save
memory, so no working matrix with more than 10^6 elements is
constructed.
References
----------
.. [*] R Borsdof, N Higham, M Raydan (2010). Computing a nearest
correlation matrix with factor structure. SIAM J Matrix Anal Appl,
31:5, 2603-2622.
http://eprints.ma.man.ac.uk/1523/01/covered/MIMS_ep2009_87.pdf
Examples
--------
Hard thresholding a correlation matrix may result in a matrix that
is not positive semidefinite. We can approximate a hard
thresholded correlation matrix with a PSD matrix as follows, where
`corr` is the input correlation matrix.
>>> import numpy as np
>>> from statsmodels.stats.correlation_tools import corr_nearest_factor
>>> np.random.seed(1234)
>>> b = 1.5 - np.random.rand(10, 1)
>>> x = np.random.randn(100,1).dot(b.T) + np.random.randn(100,10)
>>> corr = np.corrcoef(x.T)
>>> corr = corr * (np.abs(corr) >= 0.3)
>>> rslt = corr_nearest_factor(corr, 3) | corr_nearest_factor | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def cov_nearest_factor_homog(cov, rank):
"""
Approximate an arbitrary square matrix with a factor-structured
matrix of the form k*I + XX'.
Parameters
----------
cov : array_like
The input array, must be square but need not be positive
semidefinite
rank : int
The rank of the fitted factor structure
Returns
-------
A FactoredPSDMatrix instance containing the fitted matrix
Notes
-----
This routine is useful if one has an estimated covariance matrix
that is not SPD, and the ultimate goal is to estimate the inverse,
square root, or inverse square root of the true covariance
matrix. The factor structure allows these tasks to be performed
without constructing any n x n matrices.
The calculations use the fact that if k is known, then X can be
determined from the eigen-decomposition of cov - k*I, which can
in turn be easily obtained form the eigen-decomposition of `cov`.
Thus the problem can be reduced to a 1-dimensional search for k
that does not require repeated eigen-decompositions.
If the input matrix is sparse, then cov - k*I is also sparse, so
the eigen-decomposition can be done efficiently using sparse
routines.
The one-dimensional search for the optimal value of k is not
convex, so a local minimum could be obtained.
Examples
--------
Hard thresholding a covariance matrix may result in a matrix that
is not positive semidefinite. We can approximate a hard
thresholded covariance matrix with a PSD matrix as follows:
>>> import numpy as np
>>> np.random.seed(1234)
>>> b = 1.5 - np.random.rand(10, 1)
>>> x = np.random.randn(100,1).dot(b.T) + np.random.randn(100,10)
>>> cov = np.cov(x)
>>> cov = cov * (np.abs(cov) >= 0.3)
>>> rslt = cov_nearest_factor_homog(cov, 3)
"""
m, n = cov.shape
Q, Lambda, _ = svds(cov, rank)
if sparse.issparse(cov):
QSQ = np.dot(Q.T, cov.dot(Q))
ts = cov.diagonal().sum()
tss = cov.dot(cov).diagonal().sum()
else:
QSQ = np.dot(Q.T, np.dot(cov, Q))
ts = np.trace(cov)
tss = np.trace(np.dot(cov, cov))
def fun(k):
Lambda_t = Lambda - k
v = tss + m*(k**2) + np.sum(Lambda_t**2) - 2*k*ts
v += 2*k*np.sum(Lambda_t) - 2*np.sum(np.diag(QSQ) * Lambda_t)
return v
# Get the optimal decomposition
k_opt = fminbound(fun, 0, 1e5)
Lambda_opt = Lambda - k_opt
fac_opt = Q * np.sqrt(Lambda_opt)
diag = k_opt * np.ones(m, dtype=np.float64) # - (fac_opt**2).sum(1)
return FactoredPSDMatrix(diag, fac_opt) | Approximate an arbitrary square matrix with a factor-structured
matrix of the form k*I + XX'.
Parameters
----------
cov : array_like
The input array, must be square but need not be positive
semidefinite
rank : int
The rank of the fitted factor structure
Returns
-------
A FactoredPSDMatrix instance containing the fitted matrix
Notes
-----
This routine is useful if one has an estimated covariance matrix
that is not SPD, and the ultimate goal is to estimate the inverse,
square root, or inverse square root of the true covariance
matrix. The factor structure allows these tasks to be performed
without constructing any n x n matrices.
The calculations use the fact that if k is known, then X can be
determined from the eigen-decomposition of cov - k*I, which can
in turn be easily obtained form the eigen-decomposition of `cov`.
Thus the problem can be reduced to a 1-dimensional search for k
that does not require repeated eigen-decompositions.
If the input matrix is sparse, then cov - k*I is also sparse, so
the eigen-decomposition can be done efficiently using sparse
routines.
The one-dimensional search for the optimal value of k is not
convex, so a local minimum could be obtained.
Examples
--------
Hard thresholding a covariance matrix may result in a matrix that
is not positive semidefinite. We can approximate a hard
thresholded covariance matrix with a PSD matrix as follows:
>>> import numpy as np
>>> np.random.seed(1234)
>>> b = 1.5 - np.random.rand(10, 1)
>>> x = np.random.randn(100,1).dot(b.T) + np.random.randn(100,10)
>>> cov = np.cov(x)
>>> cov = cov * (np.abs(cov) >= 0.3)
>>> rslt = cov_nearest_factor_homog(cov, 3) | cov_nearest_factor_homog | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def set_bandwidth(self, bw):
"""
Set the bandwidth to the given vector.
Parameters
----------
bw : array_like
A vector of non-negative bandwidth values.
"""
self.bw = bw
self._setup() | Set the bandwidth to the given vector.
Parameters
----------
bw : array_like
A vector of non-negative bandwidth values. | set_bandwidth | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def set_default_bw(self, loc, bwm=None):
"""
Set default bandwiths based on domain values.
Parameters
----------
loc : array_like
Values from the domain to which the kernel will
be applied.
bwm : scalar, optional
A non-negative scalar that is used to multiply
the default bandwidth.
"""
sd = loc.std(0)
q25, q75 = np.percentile(loc, [25, 75], axis=0)
iqr = (q75 - q25) / 1.349
bw = np.where(iqr < sd, iqr, sd)
bw *= 0.9 / loc.shape[0] ** 0.2
if bwm is not None:
bw *= bwm
# The final bandwidths
self.bw = np.asarray(bw, dtype=np.float64)
self._setup() | Set default bandwiths based on domain values.
Parameters
----------
loc : array_like
Values from the domain to which the kernel will
be applied.
bwm : scalar, optional
A non-negative scalar that is used to multiply
the default bandwidth. | set_default_bw | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def kernel_covariance(exog, loc, groups, kernel=None, bw=None):
"""
Use kernel averaging to estimate a multivariate covariance function.
The goal is to estimate a covariance function C(x, y) =
cov(Z(x), Z(y)) where x, y are vectors in R^p (e.g. representing
locations in time or space), and Z(.) represents a multivariate
process on R^p.
The data used for estimation can be observed at arbitrary values of the
position vector, and there can be multiple independent observations
from the process.
Parameters
----------
exog : array_like
The rows of exog are realizations of the process obtained at
specified points.
loc : array_like
The rows of loc are the locations (e.g. in space or time) at
which the rows of exog are observed.
groups : array_like
The values of groups are labels for distinct independent copies
of the process.
kernel : MultivariateKernel instance, optional
An instance of MultivariateKernel, defaults to
GaussianMultivariateKernel.
bw : array_like or scalar
A bandwidth vector, or bandwidth multiplier. If a 1d array, it
contains kernel bandwidths for each component of the process, and
must have length equal to the number of columns of exog. If a scalar,
bw is a bandwidth multiplier used to adjust the default bandwidth; if
None, a default bandwidth is used.
Returns
-------
A real-valued function C(x, y) that returns an estimate of the covariance
between values of the process located at x and y.
References
----------
.. [1] Genton M, W Kleiber (2015). Cross covariance functions for
multivariate geostatics. Statistical Science 30(2).
https://arxiv.org/pdf/1507.08017.pdf
"""
exog = np.asarray(exog)
loc = np.asarray(loc)
groups = np.asarray(groups)
if loc.ndim == 1:
loc = loc[:, None]
v = [exog.shape[0], loc.shape[0], len(groups)]
if min(v) != max(v):
msg = "exog, loc, and groups must have the same number of rows"
raise ValueError(msg)
# Map from group labels to the row indices in each group.
ix = {}
for i, g in enumerate(groups):
if g not in ix:
ix[g] = []
ix[g].append(i)
for g in ix.keys():
ix[g] = np.sort(ix[g])
if kernel is None:
kernel = GaussianMultivariateKernel()
if bw is None:
kernel.set_default_bw(loc)
elif np.isscalar(bw):
kernel.set_default_bw(loc, bwm=bw)
else:
kernel.set_bandwidth(bw)
def cov(x, y):
kx = kernel.call(x, loc)
ky = kernel.call(y, loc)
cm, cw = 0., 0.
for g, ii in ix.items():
m = len(ii)
j1, j2 = np.indices((m, m))
j1 = ii[j1.flat]
j2 = ii[j2.flat]
w = kx[j1] * ky[j2]
# TODO: some other form of broadcasting may be faster than
# einsum here
cm += np.einsum("ij,ik,i->jk", exog[j1, :], exog[j2, :], w)
cw += w.sum()
if cw < 1e-10:
msg = ("Effective sample size is 0. The bandwidth may be too " +
"small, or you are outside the range of your data.")
warnings.warn(msg)
return np.nan * np.ones_like(cm)
return cm / cw
return cov | Use kernel averaging to estimate a multivariate covariance function.
The goal is to estimate a covariance function C(x, y) =
cov(Z(x), Z(y)) where x, y are vectors in R^p (e.g. representing
locations in time or space), and Z(.) represents a multivariate
process on R^p.
The data used for estimation can be observed at arbitrary values of the
position vector, and there can be multiple independent observations
from the process.
Parameters
----------
exog : array_like
The rows of exog are realizations of the process obtained at
specified points.
loc : array_like
The rows of loc are the locations (e.g. in space or time) at
which the rows of exog are observed.
groups : array_like
The values of groups are labels for distinct independent copies
of the process.
kernel : MultivariateKernel instance, optional
An instance of MultivariateKernel, defaults to
GaussianMultivariateKernel.
bw : array_like or scalar
A bandwidth vector, or bandwidth multiplier. If a 1d array, it
contains kernel bandwidths for each component of the process, and
must have length equal to the number of columns of exog. If a scalar,
bw is a bandwidth multiplier used to adjust the default bandwidth; if
None, a default bandwidth is used.
Returns
-------
A real-valued function C(x, y) that returns an estimate of the covariance
between values of the process located at x and y.
References
----------
.. [1] Genton M, W Kleiber (2015). Cross covariance functions for
multivariate geostatics. Statistical Science 30(2).
https://arxiv.org/pdf/1507.08017.pdf | kernel_covariance | python | statsmodels/statsmodels | statsmodels/stats/correlation_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/correlation_tools.py | BSD-3-Clause |
def omni_normtest(resids, axis=0):
"""
Omnibus test for normality
Parameters
----------
resid : array_like
axis : int, optional
Default is 0
Returns
-------
Chi^2 score, two-tail probability
"""
# TODO: change to exception in summary branch and catch in summary()
# behavior changed between scipy 0.9 and 0.10
resids = np.asarray(resids)
n = resids.shape[axis]
if n < 8:
from warnings import warn
warn("omni_normtest is not valid with less than 8 observations; %i "
"samples were given." % int(n), ValueWarning)
return np.nan, np.nan
return stats.normaltest(resids, axis=axis) | Omnibus test for normality
Parameters
----------
resid : array_like
axis : int, optional
Default is 0
Returns
-------
Chi^2 score, two-tail probability | omni_normtest | python | statsmodels/statsmodels | statsmodels/stats/stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/stattools.py | BSD-3-Clause |
def robust_skewness(y, axis=0):
"""
Calculates the four skewness measures in Kim & White
Parameters
----------
y : array_like
Data to compute use in the estimator.
axis : int or None, optional
Axis along which the skewness measures are computed. If `None`, the
entire array is used.
Returns
-------
sk1 : ndarray
The standard skewness estimator.
sk2 : ndarray
Skewness estimator based on quartiles.
sk3 : ndarray
Skewness estimator based on mean-median difference, standardized by
absolute deviation.
sk4 : ndarray
Skewness estimator based on mean-median difference, standardized by
standard deviation.
Notes
-----
The robust skewness measures are defined
.. math::
SK_{2}=\\frac{\\left(q_{.75}-q_{.5}\\right)
-\\left(q_{.5}-q_{.25}\\right)}{q_{.75}-q_{.25}}
.. math::
SK_{3}=\\frac{\\mu-\\hat{q}_{0.5}}
{\\hat{E}\\left[\\left|y-\\hat{\\mu}\\right|\\right]}
.. math::
SK_{4}=\\frac{\\mu-\\hat{q}_{0.5}}{\\hat{\\sigma}}
.. [*] Tae-Hwan Kim and Halbert White, "On more robust estimation of
skewness and kurtosis," Finance Research Letters, vol. 1, pp. 56-73,
March 2004.
"""
if axis is None:
y = y.ravel()
axis = 0
y = np.sort(y, axis)
q1, q2, q3 = np.percentile(y, [25.0, 50.0, 75.0], axis=axis)
mu = y.mean(axis)
shape = (y.size,)
if axis is not None:
shape = list(mu.shape)
shape.insert(axis, 1)
shape = tuple(shape)
mu_b = np.reshape(mu, shape)
q2_b = np.reshape(q2, shape)
sigma = np.sqrt(np.mean(((y - mu_b)**2), axis))
sk1 = stats.skew(y, axis=axis)
sk2 = (q1 + q3 - 2.0 * q2) / (q3 - q1)
sk3 = (mu - q2) / np.mean(abs(y - q2_b), axis=axis)
sk4 = (mu - q2) / sigma
return sk1, sk2, sk3, sk4 | Calculates the four skewness measures in Kim & White
Parameters
----------
y : array_like
Data to compute use in the estimator.
axis : int or None, optional
Axis along which the skewness measures are computed. If `None`, the
entire array is used.
Returns
-------
sk1 : ndarray
The standard skewness estimator.
sk2 : ndarray
Skewness estimator based on quartiles.
sk3 : ndarray
Skewness estimator based on mean-median difference, standardized by
absolute deviation.
sk4 : ndarray
Skewness estimator based on mean-median difference, standardized by
standard deviation.
Notes
-----
The robust skewness measures are defined
.. math::
SK_{2}=\\frac{\\left(q_{.75}-q_{.5}\\right)
-\\left(q_{.5}-q_{.25}\\right)}{q_{.75}-q_{.25}}
.. math::
SK_{3}=\\frac{\\mu-\\hat{q}_{0.5}}
{\\hat{E}\\left[\\left|y-\\hat{\\mu}\\right|\\right]}
.. math::
SK_{4}=\\frac{\\mu-\\hat{q}_{0.5}}{\\hat{\\sigma}}
.. [*] Tae-Hwan Kim and Halbert White, "On more robust estimation of
skewness and kurtosis," Finance Research Letters, vol. 1, pp. 56-73,
March 2004. | robust_skewness | python | statsmodels/statsmodels | statsmodels/stats/stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/stattools.py | BSD-3-Clause |
def _kr3(y, alpha=5.0, beta=50.0):
"""
KR3 estimator from Kim & White
Parameters
----------
y : array_like, 1-d
Data to compute use in the estimator.
alpha : float, optional
Lower cut-off for measuring expectation in tail.
beta : float, optional
Lower cut-off for measuring expectation in center.
Returns
-------
kr3 : float
Robust kurtosis estimator based on standardized lower- and upper-tail
expected values
Notes
-----
.. [*] Tae-Hwan Kim and Halbert White, "On more robust estimation of
skewness and kurtosis," Finance Research Letters, vol. 1, pp. 56-73,
March 2004.
"""
perc = (alpha, 100.0 - alpha, beta, 100.0 - beta)
lower_alpha, upper_alpha, lower_beta, upper_beta = np.percentile(y, perc)
l_alpha = np.mean(y[y < lower_alpha])
u_alpha = np.mean(y[y > upper_alpha])
l_beta = np.mean(y[y < lower_beta])
u_beta = np.mean(y[y > upper_beta])
return (u_alpha - l_alpha) / (u_beta - l_beta) | KR3 estimator from Kim & White
Parameters
----------
y : array_like, 1-d
Data to compute use in the estimator.
alpha : float, optional
Lower cut-off for measuring expectation in tail.
beta : float, optional
Lower cut-off for measuring expectation in center.
Returns
-------
kr3 : float
Robust kurtosis estimator based on standardized lower- and upper-tail
expected values
Notes
-----
.. [*] Tae-Hwan Kim and Halbert White, "On more robust estimation of
skewness and kurtosis," Finance Research Letters, vol. 1, pp. 56-73,
March 2004. | _kr3 | python | statsmodels/statsmodels | statsmodels/stats/stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/stattools.py | BSD-3-Clause |
def expected_robust_kurtosis(ab=(5.0, 50.0), dg=(2.5, 25.0)):
"""
Calculates the expected value of the robust kurtosis measures in Kim and
White assuming the data are normally distributed.
Parameters
----------
ab : iterable, optional
Contains 100*(alpha, beta) in the kr3 measure where alpha is the tail
quantile cut-off for measuring the extreme tail and beta is the central
quantile cutoff for the standardization of the measure
db : iterable, optional
Contains 100*(delta, gamma) in the kr4 measure where delta is the tail
quantile for measuring extreme values and gamma is the central quantile
used in the the standardization of the measure
Returns
-------
ekr : ndarray, 4-element
Contains the expected values of the 4 robust kurtosis measures
Notes
-----
See `robust_kurtosis` for definitions of the robust kurtosis measures
"""
alpha, beta = ab
delta, gamma = dg
expected_value = np.zeros(4)
ppf = stats.norm.ppf
pdf = stats.norm.pdf
q1, q2, q3, q5, q6, q7 = ppf(np.array((1.0, 2.0, 3.0, 5.0, 6.0, 7.0)) / 8)
expected_value[0] = 3
expected_value[1] = ((q7 - q5) + (q3 - q1)) / (q6 - q2)
q_alpha, q_beta = ppf(np.array((alpha / 100.0, beta / 100.0)))
expected_value[2] = (2 * pdf(q_alpha) / alpha) / (2 * pdf(q_beta) / beta)
q_delta, q_gamma = ppf(np.array((delta / 100.0, gamma / 100.0)))
expected_value[3] = (-2.0 * q_delta) / (-2.0 * q_gamma)
return expected_value | Calculates the expected value of the robust kurtosis measures in Kim and
White assuming the data are normally distributed.
Parameters
----------
ab : iterable, optional
Contains 100*(alpha, beta) in the kr3 measure where alpha is the tail
quantile cut-off for measuring the extreme tail and beta is the central
quantile cutoff for the standardization of the measure
db : iterable, optional
Contains 100*(delta, gamma) in the kr4 measure where delta is the tail
quantile for measuring extreme values and gamma is the central quantile
used in the the standardization of the measure
Returns
-------
ekr : ndarray, 4-element
Contains the expected values of the 4 robust kurtosis measures
Notes
-----
See `robust_kurtosis` for definitions of the robust kurtosis measures | expected_robust_kurtosis | python | statsmodels/statsmodels | statsmodels/stats/stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/stattools.py | BSD-3-Clause |
def robust_kurtosis(y, axis=0, ab=(5.0, 50.0), dg=(2.5, 25.0), excess=True):
"""
Calculates the four kurtosis measures in Kim & White
Parameters
----------
y : array_like
Data to compute use in the estimator.
axis : int or None, optional
Axis along which the kurtosis are computed. If `None`, the
entire array is used.
a iterable, optional
Contains 100*(alpha, beta) in the kr3 measure where alpha is the tail
quantile cut-off for measuring the extreme tail and beta is the central
quantile cutoff for the standardization of the measure
db : iterable, optional
Contains 100*(delta, gamma) in the kr4 measure where delta is the tail
quantile for measuring extreme values and gamma is the central quantile
used in the the standardization of the measure
excess : bool, optional
If true (default), computed values are excess of those for a standard
normal distribution.
Returns
-------
kr1 : ndarray
The standard kurtosis estimator.
kr2 : ndarray
Kurtosis estimator based on octiles.
kr3 : ndarray
Kurtosis estimators based on exceedance expectations.
kr4 : ndarray
Kurtosis measure based on the spread between high and low quantiles.
Notes
-----
The robust kurtosis measures are defined
.. math::
KR_{2}=\\frac{\\left(\\hat{q}_{.875}-\\hat{q}_{.625}\\right)
+\\left(\\hat{q}_{.375}-\\hat{q}_{.125}\\right)}
{\\hat{q}_{.75}-\\hat{q}_{.25}}
.. math::
KR_{3}=\\frac{\\hat{E}\\left(y|y>\\hat{q}_{1-\\alpha}\\right)
-\\hat{E}\\left(y|y<\\hat{q}_{\\alpha}\\right)}
{\\hat{E}\\left(y|y>\\hat{q}_{1-\\beta}\\right)
-\\hat{E}\\left(y|y<\\hat{q}_{\\beta}\\right)}
.. math::
KR_{4}=\\frac{\\hat{q}_{1-\\delta}-\\hat{q}_{\\delta}}
{\\hat{q}_{1-\\gamma}-\\hat{q}_{\\gamma}}
where :math:`\\hat{q}_{p}` is the estimated quantile at :math:`p`.
.. [*] Tae-Hwan Kim and Halbert White, "On more robust estimation of
skewness and kurtosis," Finance Research Letters, vol. 1, pp. 56-73,
March 2004.
"""
if (axis is None or
(y.squeeze().ndim == 1 and y.ndim != 1)):
y = y.ravel()
axis = 0
alpha, beta = ab
delta, gamma = dg
perc = (12.5, 25.0, 37.5, 62.5, 75.0, 87.5,
delta, 100.0 - delta, gamma, 100.0 - gamma)
e1, e2, e3, e5, e6, e7, fd, f1md, fg, f1mg = np.percentile(y, perc,
axis=axis)
expected_value = (expected_robust_kurtosis(ab, dg)
if excess else np.zeros(4))
kr1 = stats.kurtosis(y, axis, False) - expected_value[0]
kr2 = ((e7 - e5) + (e3 - e1)) / (e6 - e2) - expected_value[1]
if y.ndim == 1:
kr3 = _kr3(y, alpha, beta)
else:
kr3 = np.apply_along_axis(_kr3, axis, y, alpha, beta)
kr3 -= expected_value[2]
kr4 = (f1md - fd) / (f1mg - fg) - expected_value[3]
return kr1, kr2, kr3, kr4 | Calculates the four kurtosis measures in Kim & White
Parameters
----------
y : array_like
Data to compute use in the estimator.
axis : int or None, optional
Axis along which the kurtosis are computed. If `None`, the
entire array is used.
a iterable, optional
Contains 100*(alpha, beta) in the kr3 measure where alpha is the tail
quantile cut-off for measuring the extreme tail and beta is the central
quantile cutoff for the standardization of the measure
db : iterable, optional
Contains 100*(delta, gamma) in the kr4 measure where delta is the tail
quantile for measuring extreme values and gamma is the central quantile
used in the the standardization of the measure
excess : bool, optional
If true (default), computed values are excess of those for a standard
normal distribution.
Returns
-------
kr1 : ndarray
The standard kurtosis estimator.
kr2 : ndarray
Kurtosis estimator based on octiles.
kr3 : ndarray
Kurtosis estimators based on exceedance expectations.
kr4 : ndarray
Kurtosis measure based on the spread between high and low quantiles.
Notes
-----
The robust kurtosis measures are defined
.. math::
KR_{2}=\\frac{\\left(\\hat{q}_{.875}-\\hat{q}_{.625}\\right)
+\\left(\\hat{q}_{.375}-\\hat{q}_{.125}\\right)}
{\\hat{q}_{.75}-\\hat{q}_{.25}}
.. math::
KR_{3}=\\frac{\\hat{E}\\left(y|y>\\hat{q}_{1-\\alpha}\\right)
-\\hat{E}\\left(y|y<\\hat{q}_{\\alpha}\\right)}
{\\hat{E}\\left(y|y>\\hat{q}_{1-\\beta}\\right)
-\\hat{E}\\left(y|y<\\hat{q}_{\\beta}\\right)}
.. math::
KR_{4}=\\frac{\\hat{q}_{1-\\delta}-\\hat{q}_{\\delta}}
{\\hat{q}_{1-\\gamma}-\\hat{q}_{\\gamma}}
where :math:`\\hat{q}_{p}` is the estimated quantile at :math:`p`.
.. [*] Tae-Hwan Kim and Halbert White, "On more robust estimation of
skewness and kurtosis," Finance Research Letters, vol. 1, pp. 56-73,
March 2004. | robust_kurtosis | python | statsmodels/statsmodels | statsmodels/stats/stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/stattools.py | BSD-3-Clause |
def _medcouple_1d(y):
"""
Calculates the medcouple robust measure of skew.
Parameters
----------
y : array_like, 1-d
Data to compute use in the estimator.
Returns
-------
mc : float
The medcouple statistic
Notes
-----
The current algorithm requires a O(N**2) memory allocations, and so may
not work for very large arrays (N>10000).
.. [*] M. Hubert and E. Vandervieren, "An adjusted boxplot for skewed
distributions" Computational Statistics & Data Analysis, vol. 52, pp.
5186-5201, August 2008.
"""
# Parameter changes the algorithm to the slower for large n
y = np.squeeze(np.asarray(y))
if y.ndim != 1:
raise ValueError("y must be squeezable to a 1-d array")
y = np.sort(y)
n = y.shape[0]
if n % 2 == 0:
mf = (y[n // 2 - 1] + y[n // 2]) / 2
else:
mf = y[(n - 1) // 2]
z = y - mf
lower = z[z <= 0.0]
upper = z[z >= 0.0]
upper = upper[:, None]
standardization = upper - lower
is_zero = np.logical_and(lower == 0.0, upper == 0.0)
standardization[is_zero] = np.inf
spread = upper + lower
h = spread / standardization
# GH5395
num_ties = np.sum(lower == 0.0)
if num_ties:
# Replacements has -1 above the anti-diagonal, 0 on the anti-diagonal,
# and 1 below the anti-diagonal
replacements = np.ones((num_ties, num_ties)) - np.eye(num_ties)
replacements -= 2 * np.triu(replacements)
# Convert diagonal to anti-diagonal
replacements = np.fliplr(replacements)
# Always replace upper right block
h[:num_ties, -num_ties:] = replacements
return np.median(h) | Calculates the medcouple robust measure of skew.
Parameters
----------
y : array_like, 1-d
Data to compute use in the estimator.
Returns
-------
mc : float
The medcouple statistic
Notes
-----
The current algorithm requires a O(N**2) memory allocations, and so may
not work for very large arrays (N>10000).
.. [*] M. Hubert and E. Vandervieren, "An adjusted boxplot for skewed
distributions" Computational Statistics & Data Analysis, vol. 52, pp.
5186-5201, August 2008. | _medcouple_1d | python | statsmodels/statsmodels | statsmodels/stats/stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/stattools.py | BSD-3-Clause |
def medcouple(y, axis=0):
"""
Calculate the medcouple robust measure of skew.
Parameters
----------
y : array_like
Data to compute use in the estimator.
axis : {int, None}
Axis along which the medcouple statistic is computed. If `None`, the
entire array is used.
Returns
-------
mc : ndarray
The medcouple statistic with the same shape as `y`, with the specified
axis removed.
Notes
-----
The current algorithm requires a O(N**2) memory allocations, and so may
not work for very large arrays (N>10000).
.. [*] M. Hubert and E. Vandervieren, "An adjusted boxplot for skewed
distributions" Computational Statistics & Data Analysis, vol. 52, pp.
5186-5201, August 2008.
"""
y = np.asarray(y, dtype=np.double) # GH 4243
if axis is None:
return _medcouple_1d(y.ravel())
return np.apply_along_axis(_medcouple_1d, axis, y) | Calculate the medcouple robust measure of skew.
Parameters
----------
y : array_like
Data to compute use in the estimator.
axis : {int, None}
Axis along which the medcouple statistic is computed. If `None`, the
entire array is used.
Returns
-------
mc : ndarray
The medcouple statistic with the same shape as `y`, with the specified
axis removed.
Notes
-----
The current algorithm requires a O(N**2) memory allocations, and so may
not work for very large arrays (N>10000).
.. [*] M. Hubert and E. Vandervieren, "An adjusted boxplot for skewed
distributions" Computational Statistics & Data Analysis, vol. 52, pp.
5186-5201, August 2008. | medcouple | python | statsmodels/statsmodels | statsmodels/stats/stattools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/stattools.py | BSD-3-Clause |
def _mover_confint(stat1, stat2, ci1, ci2, contrast="diff"):
"""
References
----------
.. [#] Krishnamoorthy, K., Jie Peng, and Dan Zhang. 2016. “Modified Large
Sample Confidence Intervals for Poisson Distributions: Ratio, Weighted
Average, and Product of Means.” Communications in Statistics - Theory
and Methods 45 (1): 83–97. https://doi.org/10.1080/03610926.2013.821486.
.. [#] Li, Yanhong, John J. Koval, Allan Donner, and G. Y. Zou. 2010.
“Interval Estimation for the Area under the Receiver Operating
Characteristic Curve When Data Are Subject to Error.” Statistics in
Medicine 29 (24): 2521–31. https://doi.org/10.1002/sim.4015.
.. [#] Zou, G. Y., and A. Donner. 2008. “Construction of Confidence Limits
about Effect Measures: A General Approach.” Statistics in Medicine 27
(10): 1693–1702. https://doi.org/10.1002/sim.3095.
"""
if contrast == "diff":
stat = stat1 - stat2
low_half = np.sqrt((stat1 - ci1[0])**2 + (stat2 - ci2[1])**2)
upp_half = np.sqrt((stat1 - ci1[1])**2 + (stat2 - ci2[0])**2)
ci = (stat - low_half, stat + upp_half)
elif contrast == "sum":
stat = stat1 + stat2
low_half = np.sqrt((stat1 - ci1[0])**2 + (stat2 - ci2[0])**2)
upp_half = np.sqrt((stat1 - ci1[1])**2 + (stat2 - ci2[1])**2)
ci = (stat - low_half, stat + upp_half)
elif contrast == "ratio":
# stat = stat1 / stat2
prod = stat1 * stat2
term1 = stat2**2 - (ci2[1] - stat2)**2
term2 = stat2**2 - (ci2[0] - stat2)**2
low_ = (prod -
np.sqrt(prod**2 - term1 * (stat1**2 - (ci1[0] - stat1)**2))
) / term1
upp_ = (prod +
np.sqrt(prod**2 - term2 * (stat1**2 - (ci1[1] - stat1)**2))
) / term2
# method 2 Li, Tang, Wong 2014
low1, upp1 = ci1
low2, upp2 = ci2
term1 = upp2 * (2 * stat2 - upp2)
term2 = low2 * (2 * stat2 - low2)
low = (prod -
np.sqrt(prod**2 - term1 * low1 * (2 * stat1 - low1))
) / term1
upp = (prod +
np.sqrt(prod**2 - term2 * upp1 * (2 * stat1 - upp1))
) / term2
assert_allclose((low_, upp_), (low, upp), atol=1e-15, rtol=1e-10)
ci = (low, upp)
return ci | References
----------
.. [#] Krishnamoorthy, K., Jie Peng, and Dan Zhang. 2016. “Modified Large
Sample Confidence Intervals for Poisson Distributions: Ratio, Weighted
Average, and Product of Means.” Communications in Statistics - Theory
and Methods 45 (1): 83–97. https://doi.org/10.1080/03610926.2013.821486.
.. [#] Li, Yanhong, John J. Koval, Allan Donner, and G. Y. Zou. 2010.
“Interval Estimation for the Area under the Receiver Operating
Characteristic Curve When Data Are Subject to Error.” Statistics in
Medicine 29 (24): 2521–31. https://doi.org/10.1002/sim.4015.
.. [#] Zou, G. Y., and A. Donner. 2008. “Construction of Confidence Limits
about Effect Measures: A General Approach.” Statistics in Medicine 27
(10): 1693–1702. https://doi.org/10.1002/sim.3095. | _mover_confint | python | statsmodels/statsmodels | statsmodels/stats/_inference_tools.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/_inference_tools.py | BSD-3-Clause |
def _check_nested_exog(small, large):
"""
Check if a larger exog nests a smaller exog
Parameters
----------
small : ndarray
exog from smaller model
large : ndarray
exog from larger model
Returns
-------
bool
True if small is nested by large
"""
if small.shape[1] > large.shape[1]:
return False
coef = np.linalg.lstsq(large, small, rcond=None)[0]
err = small - large @ coef
return np.linalg.matrix_rank(np.c_[large, err]) == large.shape[1] | Check if a larger exog nests a smaller exog
Parameters
----------
small : ndarray
exog from smaller model
large : ndarray
exog from larger model
Returns
-------
bool
True if small is nested by large | _check_nested_exog | python | statsmodels/statsmodels | statsmodels/stats/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/diagnostic.py | BSD-3-Clause |
def compare_cox(results_x, results_z, store=False):
"""
Compute the Cox test for non-nested models
Parameters
----------
results_x : Result instance
result instance of first model
results_z : Result instance
result instance of second model
store : bool, default False
If true, then the intermediate results are returned.
Returns
-------
tstat : float
t statistic for the test that including the fitted values of the
first model in the second model has no effect.
pvalue : float
two-sided pvalue for the t statistic
res_store : ResultsStore, optional
Intermediate results. Returned if store is True.
Notes
-----
Tests of non-nested hypothesis might not provide unambiguous answers.
The test should be performed in both directions and it is possible
that both or neither test rejects. see [1]_ for more information.
Formulas from [1]_, section 8.3.4 translated to code
Matches results for Example 8.3 in Greene
References
----------
.. [1] Greene, W. H. Econometric Analysis. New Jersey. Prentice Hall;
5th edition. (2002).
"""
if _check_nested_results(results_x, results_z):
raise ValueError(NESTED_ERROR.format(test="Cox comparison"))
x = results_x.model.exog
z = results_z.model.exog
nobs = results_x.model.endog.shape[0]
sigma2_x = results_x.ssr / nobs
sigma2_z = results_z.ssr / nobs
yhat_x = results_x.fittedvalues
res_dx = OLS(yhat_x, z).fit()
err_zx = res_dx.resid
res_xzx = OLS(err_zx, x).fit()
err_xzx = res_xzx.resid
sigma2_zx = sigma2_x + np.dot(err_zx.T, err_zx) / nobs
c01 = nobs / 2. * (np.log(sigma2_z) - np.log(sigma2_zx))
v01 = sigma2_x * np.dot(err_xzx.T, err_xzx) / sigma2_zx ** 2
q = c01 / np.sqrt(v01)
pval = 2 * stats.norm.sf(np.abs(q))
if store:
res = ResultsStore()
res.res_dx = res_dx
res.res_xzx = res_xzx
res.c01 = c01
res.v01 = v01
res.q = q
res.pvalue = pval
res.dist = stats.norm
return q, pval, res
return q, pval | Compute the Cox test for non-nested models
Parameters
----------
results_x : Result instance
result instance of first model
results_z : Result instance
result instance of second model
store : bool, default False
If true, then the intermediate results are returned.
Returns
-------
tstat : float
t statistic for the test that including the fitted values of the
first model in the second model has no effect.
pvalue : float
two-sided pvalue for the t statistic
res_store : ResultsStore, optional
Intermediate results. Returned if store is True.
Notes
-----
Tests of non-nested hypothesis might not provide unambiguous answers.
The test should be performed in both directions and it is possible
that both or neither test rejects. see [1]_ for more information.
Formulas from [1]_, section 8.3.4 translated to code
Matches results for Example 8.3 in Greene
References
----------
.. [1] Greene, W. H. Econometric Analysis. New Jersey. Prentice Hall;
5th edition. (2002). | compare_cox | python | statsmodels/statsmodels | statsmodels/stats/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/diagnostic.py | BSD-3-Clause |
def compare_j(results_x, results_z, store=False):
"""
Compute the J-test for non-nested models
Parameters
----------
results_x : RegressionResults
The result instance of first model.
results_z : RegressionResults
The result instance of second model.
store : bool, default False
If true, then the intermediate results are returned.
Returns
-------
tstat : float
t statistic for the test that including the fitted values of the
first model in the second model has no effect.
pvalue : float
two-sided pvalue for the t statistic
res_store : ResultsStore, optional
Intermediate results. Returned if store is True.
Notes
-----
From description in Greene, section 8.3.3. Matches results for Example
8.3, Greene.
Tests of non-nested hypothesis might not provide unambiguous answers.
The test should be performed in both directions and it is possible
that both or neither test rejects. see Greene for more information.
References
----------
.. [1] Greene, W. H. Econometric Analysis. New Jersey. Prentice Hall;
5th edition. (2002).
"""
# TODO: Allow cov to be specified
if _check_nested_results(results_x, results_z):
raise ValueError(NESTED_ERROR.format(test="J comparison"))
y = results_x.model.endog
z = results_z.model.exog
yhat_x = results_x.fittedvalues
res_zx = OLS(y, np.column_stack((yhat_x, z))).fit()
tstat = res_zx.tvalues[0]
pval = res_zx.pvalues[0]
if store:
res = ResultsStore()
res.res_zx = res_zx
res.dist = stats.t(res_zx.df_resid)
res.teststat = tstat
res.pvalue = pval
return tstat, pval, res
return tstat, pval | Compute the J-test for non-nested models
Parameters
----------
results_x : RegressionResults
The result instance of first model.
results_z : RegressionResults
The result instance of second model.
store : bool, default False
If true, then the intermediate results are returned.
Returns
-------
tstat : float
t statistic for the test that including the fitted values of the
first model in the second model has no effect.
pvalue : float
two-sided pvalue for the t statistic
res_store : ResultsStore, optional
Intermediate results. Returned if store is True.
Notes
-----
From description in Greene, section 8.3.3. Matches results for Example
8.3, Greene.
Tests of non-nested hypothesis might not provide unambiguous answers.
The test should be performed in both directions and it is possible
that both or neither test rejects. see Greene for more information.
References
----------
.. [1] Greene, W. H. Econometric Analysis. New Jersey. Prentice Hall;
5th edition. (2002). | compare_j | python | statsmodels/statsmodels | statsmodels/stats/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/diagnostic.py | BSD-3-Clause |
def acorr_ljungbox(x, lags=None, boxpierce=False, model_df=0, period=None,
return_df=True, auto_lag=False):
"""
Ljung-Box test of autocorrelation in residuals.
Parameters
----------
x : array_like
The data series. The data is demeaned before the test statistic is
computed.
lags : {int, array_like}, default None
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag
length. If lags is a list or array, then all lags are included up to
the largest lag in the list, however only the tests for the lags in
the list are reported. If lags is None, then the default maxlag is
min(10, nobs // 5). The default number of lags changes if period
is set.
boxpierce : bool, default False
If true, then additional to the results of the Ljung-Box test also the
Box-Pierce test results are returned.
model_df : int, default 0
Number of degrees of freedom consumed by the model. In an ARMA model,
this value is usually p+q where p is the AR order and q is the MA
order. This value is subtracted from the degrees-of-freedom used in
the test so that the adjusted dof for the statistics are
lags - model_df. If lags - model_df <= 0, then NaN is returned.
period : int, default None
The period of a Seasonal time series. Used to compute the max lag
for seasonal data which uses min(2*period, nobs // 5) if set. If None,
then the default rule is used to set the number of lags. When set, must
be >= 2.
auto_lag : bool, default False
Flag indicating whether to automatically determine the optimal lag
length based on threshold of maximum correlation value.
Returns
-------
DataFrame
Frame with columns:
* lb_stat - The Ljung-Box test statistic.
* lb_pvalue - The p-value based on chi-square distribution. The
p-value is computed as 1 - chi2.cdf(lb_stat, dof) where dof is
lag - model_df. If lag - model_df <= 0, then NaN is returned for
the pvalue.
* bp_stat - The Box-Pierce test statistic.
* bp_pvalue - The p-value based for Box-Pierce test on chi-square
distribution. The p-value is computed as 1 - chi2.cdf(bp_stat, dof)
where dof is lag - model_df. If lag - model_df <= 0, then NaN is
returned for the pvalue.
See Also
--------
statsmodels.regression.linear_model.OLS.fit
Regression model fitting.
statsmodels.regression.linear_model.RegressionResults
Results from linear regression models.
statsmodels.stats.stattools.q_stat
Ljung-Box test statistic computed from estimated
autocorrelations.
Notes
-----
Ljung-Box and Box-Pierce statistic differ in their scaling of the
autocorrelation function. Ljung-Box test is has better finite-sample
properties.
References
----------
.. [*] Green, W. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] J. Carlos Escanciano, Ignacio N. Lobato
"An automatic Portmanteau test for serial correlation".,
Volume 151, 2009.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.sunspots.load_pandas().data
>>> res = sm.tsa.ARMA(data["SUNACTIVITY"], (1,1)).fit(disp=-1)
>>> sm.stats.acorr_ljungbox(res.resid, lags=[10], return_df=True)
lb_stat lb_pvalue
10 214.106992 1.827374e-40
"""
# Avoid cyclic import
from statsmodels.tsa.stattools._stattools import acf
x = array_like(x, "x")
period = int_like(period, "period", optional=True)
model_df = int_like(model_df, "model_df", optional=False)
if period is not None and period <= 1:
raise ValueError("period must be >= 2")
if model_df < 0:
raise ValueError("model_df must be >= 0")
nobs = x.shape[0]
if auto_lag:
maxlag = nobs - 1
# Compute sum of squared autocorrelations
sacf = acf(x, nlags=maxlag, fft=False)
if not boxpierce:
q_sacf = (nobs * (nobs + 2) *
np.cumsum(sacf[1:maxlag + 1] ** 2
/ (nobs - np.arange(1, maxlag + 1))))
else:
q_sacf = nobs * np.cumsum(sacf[1:maxlag + 1] ** 2)
# obtain thresholds
q = 2.4
threshold = np.sqrt(q * np.log(nobs))
threshold_metric = np.abs(sacf).max() * np.sqrt(nobs)
# compute penalized sum of squared autocorrelations
if (threshold_metric <= threshold):
q_sacf = q_sacf - (np.arange(1, nobs) * np.log(nobs))
else:
q_sacf = q_sacf - (2 * np.arange(1, nobs))
# note: np.argmax returns first (i.e., smallest) index of largest value
lags = np.argmax(q_sacf)
lags = max(1, lags) # optimal lag has to be at least 1
lags = int_like(lags, "lags")
lags = np.arange(1, lags + 1)
elif period is not None:
lags = np.arange(1, min(nobs // 5, 2 * period) + 1, dtype=int)
elif lags is None:
lags = np.arange(1, min(nobs // 5, 10) + 1, dtype=int)
elif not isinstance(lags, Iterable):
lags = int_like(lags, "lags")
lags = np.arange(1, lags + 1)
lags = array_like(lags, "lags", dtype="int")
maxlag = lags.max()
# normalize by nobs not (nobs-nlags)
# SS: unbiased=False is default now
sacf = acf(x, nlags=maxlag, fft=False)
sacf2 = sacf[1:maxlag + 1] ** 2 / (nobs - np.arange(1, maxlag + 1))
qljungbox = nobs * (nobs + 2) * np.cumsum(sacf2)[lags - 1]
adj_lags = lags - model_df
pval = np.full_like(qljungbox, np.nan)
loc = adj_lags > 0
pval[loc] = stats.chi2.sf(qljungbox[loc], adj_lags[loc])
if not boxpierce:
return pd.DataFrame({"lb_stat": qljungbox, "lb_pvalue": pval},
index=lags)
qboxpierce = nobs * np.cumsum(sacf[1:maxlag + 1] ** 2)[lags - 1]
pvalbp = np.full_like(qljungbox, np.nan)
pvalbp[loc] = stats.chi2.sf(qboxpierce[loc], adj_lags[loc])
return pd.DataFrame({"lb_stat": qljungbox, "lb_pvalue": pval,
"bp_stat": qboxpierce, "bp_pvalue": pvalbp},
index=lags) | Ljung-Box test of autocorrelation in residuals.
Parameters
----------
x : array_like
The data series. The data is demeaned before the test statistic is
computed.
lags : {int, array_like}, default None
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag
length. If lags is a list or array, then all lags are included up to
the largest lag in the list, however only the tests for the lags in
the list are reported. If lags is None, then the default maxlag is
min(10, nobs // 5). The default number of lags changes if period
is set.
boxpierce : bool, default False
If true, then additional to the results of the Ljung-Box test also the
Box-Pierce test results are returned.
model_df : int, default 0
Number of degrees of freedom consumed by the model. In an ARMA model,
this value is usually p+q where p is the AR order and q is the MA
order. This value is subtracted from the degrees-of-freedom used in
the test so that the adjusted dof for the statistics are
lags - model_df. If lags - model_df <= 0, then NaN is returned.
period : int, default None
The period of a Seasonal time series. Used to compute the max lag
for seasonal data which uses min(2*period, nobs // 5) if set. If None,
then the default rule is used to set the number of lags. When set, must
be >= 2.
auto_lag : bool, default False
Flag indicating whether to automatically determine the optimal lag
length based on threshold of maximum correlation value.
Returns
-------
DataFrame
Frame with columns:
* lb_stat - The Ljung-Box test statistic.
* lb_pvalue - The p-value based on chi-square distribution. The
p-value is computed as 1 - chi2.cdf(lb_stat, dof) where dof is
lag - model_df. If lag - model_df <= 0, then NaN is returned for
the pvalue.
* bp_stat - The Box-Pierce test statistic.
* bp_pvalue - The p-value based for Box-Pierce test on chi-square
distribution. The p-value is computed as 1 - chi2.cdf(bp_stat, dof)
where dof is lag - model_df. If lag - model_df <= 0, then NaN is
returned for the pvalue.
See Also
--------
statsmodels.regression.linear_model.OLS.fit
Regression model fitting.
statsmodels.regression.linear_model.RegressionResults
Results from linear regression models.
statsmodels.stats.stattools.q_stat
Ljung-Box test statistic computed from estimated
autocorrelations.
Notes
-----
Ljung-Box and Box-Pierce statistic differ in their scaling of the
autocorrelation function. Ljung-Box test is has better finite-sample
properties.
References
----------
.. [*] Green, W. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] J. Carlos Escanciano, Ignacio N. Lobato
"An automatic Portmanteau test for serial correlation".,
Volume 151, 2009.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.sunspots.load_pandas().data
>>> res = sm.tsa.ARMA(data["SUNACTIVITY"], (1,1)).fit(disp=-1)
>>> sm.stats.acorr_ljungbox(res.resid, lags=[10], return_df=True)
lb_stat lb_pvalue
10 214.106992 1.827374e-40 | acorr_ljungbox | python | statsmodels/statsmodels | statsmodels/stats/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/diagnostic.py | BSD-3-Clause |
def acorr_lm(resid, nlags=None, store=False, *, period=None,
ddof=0, cov_type="nonrobust", cov_kwds=None):
"""
Lagrange Multiplier tests for autocorrelation.
This is a generic Lagrange Multiplier test for autocorrelation. Returns
Engle's ARCH test if resid is the squared residual array. Breusch-Godfrey
is a variation on this test with additional exogenous variables.
Parameters
----------
resid : array_like
Time series to test.
nlags : int, default None
Highest lag to use.
store : bool, default False
If true then the intermediate results are also returned.
period : int, default none
The period of a Seasonal time series. Used to compute the max lag
for seasonal data which uses min(2*period, nobs // 5) if set. If None,
then the default rule is used to set the number of lags. When set, must
be >= 2.
ddof : int, default 0
The number of degrees of freedom consumed by the model used to
produce resid. The default value is 0.
cov_type : str, default "nonrobust"
Covariance type. The default is "nonrobust` which uses the classic
OLS covariance estimator. Specify one of "HC0", "HC1", "HC2", "HC3"
to use White's covariance estimator. All covariance types supported
by ``OLS.fit`` are accepted.
cov_kwds : dict, default None
Dictionary of covariance options passed to ``OLS.fit``. See OLS.fit for
more details.
Returns
-------
lm : float
Lagrange multiplier test statistic.
lmpval : float
The p-value for Lagrange multiplier test.
fval : float
The f statistic of the F test, alternative version of the same
test based on F test for the parameter restriction.
fpval : float
The pvalue of the F test.
res_store : ResultsStore, optional
Intermediate results. Only returned if store=True.
See Also
--------
het_arch
Conditional heteroskedasticity testing.
acorr_breusch_godfrey
Breusch-Godfrey test for serial correlation.
acorr_ljung_box
Ljung-Box test for serial correlation.
Notes
-----
The test statistic is computed as (nobs - ddof) * r2 where r2 is the
R-squared from a regression on the residual on nlags lags of the
residual.
"""
resid = array_like(resid, "resid", ndim=1)
cov_type = string_like(cov_type, "cov_type")
cov_kwds = {} if cov_kwds is None else cov_kwds
cov_kwds = dict_like(cov_kwds, "cov_kwds")
nobs = resid.shape[0]
if period is not None and nlags is None:
maxlag = min(nobs // 5, 2 * period)
elif nlags is None:
maxlag = min(10, nobs // 5)
else:
maxlag = nlags
xdall = lagmat(resid[:, None], maxlag, trim="both")
nobs = xdall.shape[0]
xdall = np.c_[np.ones((nobs, 1)), xdall]
xshort = resid[-nobs:]
res_store = ResultsStore()
usedlag = maxlag
resols = OLS(xshort, xdall[:, :usedlag + 1]).fit(cov_type=cov_type,
cov_kwds=cov_kwds)
fval = float(resols.fvalue)
fpval = float(resols.f_pvalue)
if cov_type == "nonrobust":
lm = (nobs - ddof) * resols.rsquared
lmpval = stats.chi2.sf(lm, usedlag)
# Note: deg of freedom for LM test: nvars - constant = lags used
else:
r_matrix = np.hstack((np.zeros((usedlag, 1)), np.eye(usedlag)))
test_stat = resols.wald_test(r_matrix, use_f=False, scalar=True)
lm = float(test_stat.statistic)
lmpval = float(test_stat.pvalue)
if store:
res_store.resols = resols
res_store.usedlag = usedlag
return lm, lmpval, fval, fpval, res_store
else:
return lm, lmpval, fval, fpval | Lagrange Multiplier tests for autocorrelation.
This is a generic Lagrange Multiplier test for autocorrelation. Returns
Engle's ARCH test if resid is the squared residual array. Breusch-Godfrey
is a variation on this test with additional exogenous variables.
Parameters
----------
resid : array_like
Time series to test.
nlags : int, default None
Highest lag to use.
store : bool, default False
If true then the intermediate results are also returned.
period : int, default none
The period of a Seasonal time series. Used to compute the max lag
for seasonal data which uses min(2*period, nobs // 5) if set. If None,
then the default rule is used to set the number of lags. When set, must
be >= 2.
ddof : int, default 0
The number of degrees of freedom consumed by the model used to
produce resid. The default value is 0.
cov_type : str, default "nonrobust"
Covariance type. The default is "nonrobust` which uses the classic
OLS covariance estimator. Specify one of "HC0", "HC1", "HC2", "HC3"
to use White's covariance estimator. All covariance types supported
by ``OLS.fit`` are accepted.
cov_kwds : dict, default None
Dictionary of covariance options passed to ``OLS.fit``. See OLS.fit for
more details.
Returns
-------
lm : float
Lagrange multiplier test statistic.
lmpval : float
The p-value for Lagrange multiplier test.
fval : float
The f statistic of the F test, alternative version of the same
test based on F test for the parameter restriction.
fpval : float
The pvalue of the F test.
res_store : ResultsStore, optional
Intermediate results. Only returned if store=True.
See Also
--------
het_arch
Conditional heteroskedasticity testing.
acorr_breusch_godfrey
Breusch-Godfrey test for serial correlation.
acorr_ljung_box
Ljung-Box test for serial correlation.
Notes
-----
The test statistic is computed as (nobs - ddof) * r2 where r2 is the
R-squared from a regression on the residual on nlags lags of the
residual. | acorr_lm | python | statsmodels/statsmodels | statsmodels/stats/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/diagnostic.py | BSD-3-Clause |
def het_arch(resid, nlags=None, store=False, ddof=0):
"""
Engle's Test for Autoregressive Conditional Heteroscedasticity (ARCH).
Parameters
----------
resid : ndarray
residuals from an estimation, or time series
nlags : int, default None
Highest lag to use.
store : bool, default False
If true then the intermediate results are also returned
ddof : int, default 0
If the residuals are from a regression, or ARMA estimation, then there
are recommendations to correct the degrees of freedom by the number
of parameters that have been estimated, for example ddof=p+q for an
ARMA(p,q).
Returns
-------
lm : float
Lagrange multiplier test statistic
lmpval : float
p-value for Lagrange multiplier test
fval : float
fstatistic for F test, alternative version of the same test based on
F test for the parameter restriction
fpval : float
pvalue for F test
res_store : ResultsStore, optional
Intermediate results. Returned if store is True.
Notes
-----
verified against R:FinTS::ArchTest
"""
return acorr_lm(resid ** 2, nlags=nlags, store=store, ddof=ddof) | Engle's Test for Autoregressive Conditional Heteroscedasticity (ARCH).
Parameters
----------
resid : ndarray
residuals from an estimation, or time series
nlags : int, default None
Highest lag to use.
store : bool, default False
If true then the intermediate results are also returned
ddof : int, default 0
If the residuals are from a regression, or ARMA estimation, then there
are recommendations to correct the degrees of freedom by the number
of parameters that have been estimated, for example ddof=p+q for an
ARMA(p,q).
Returns
-------
lm : float
Lagrange multiplier test statistic
lmpval : float
p-value for Lagrange multiplier test
fval : float
fstatistic for F test, alternative version of the same test based on
F test for the parameter restriction
fpval : float
pvalue for F test
res_store : ResultsStore, optional
Intermediate results. Returned if store is True.
Notes
-----
verified against R:FinTS::ArchTest | het_arch | python | statsmodels/statsmodels | statsmodels/stats/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/diagnostic.py | BSD-3-Clause |
def acorr_breusch_godfrey(res, nlags=None, store=False):
"""
Breusch-Godfrey Lagrange Multiplier tests for residual autocorrelation.
Parameters
----------
res : RegressionResults
Estimation results for which the residuals are tested for serial
correlation.
nlags : int, optional
Number of lags to include in the auxiliary regression. (nlags is
highest lag).
store : bool, default False
If store is true, then an additional class instance that contains
intermediate results is returned.
Returns
-------
lm : float
Lagrange multiplier test statistic.
lmpval : float
The p-value for Lagrange multiplier test.
fval : float
The value of the f statistic for F test, alternative version of the
same test based on F test for the parameter restriction.
fpval : float
The pvalue for F test.
res_store : ResultsStore
A class instance that holds intermediate results. Only returned if
store=True.
Notes
-----
BG adds lags of residual to exog in the design matrix for the auxiliary
regression with residuals as endog. See [1]_, section 12.7.1.
References
----------
.. [1] Greene, W. H. Econometric Analysis. New Jersey. Prentice Hall;
5th edition. (2002).
"""
x = np.asarray(res.resid).squeeze()
if x.ndim != 1:
raise ValueError("Model resid must be a 1d array. Cannot be used on"
" multivariate models.")
exog_old = res.model.exog
nobs = x.shape[0]
if nlags is None:
nlags = min(10, nobs // 5)
x = np.concatenate((np.zeros(nlags), x))
xdall = lagmat(x[:, None], nlags, trim="both")
nobs = xdall.shape[0]
xdall = np.c_[np.ones((nobs, 1)), xdall]
xshort = x[-nobs:]
if exog_old is None:
exog = xdall
else:
exog = np.column_stack((exog_old, xdall))
k_vars = exog.shape[1]
resols = OLS(xshort, exog).fit()
ft = resols.f_test(np.eye(nlags, k_vars, k_vars - nlags))
fval = ft.fvalue
fpval = ft.pvalue
fval = float(np.squeeze(fval))
fpval = float(np.squeeze(fpval))
lm = nobs * resols.rsquared
lmpval = stats.chi2.sf(lm, nlags)
# Note: degrees of freedom for LM test is nvars minus constant = usedlags
if store:
res_store = ResultsStore()
res_store.resols = resols
res_store.usedlag = nlags
return lm, lmpval, fval, fpval, res_store
else:
return lm, lmpval, fval, fpval | Breusch-Godfrey Lagrange Multiplier tests for residual autocorrelation.
Parameters
----------
res : RegressionResults
Estimation results for which the residuals are tested for serial
correlation.
nlags : int, optional
Number of lags to include in the auxiliary regression. (nlags is
highest lag).
store : bool, default False
If store is true, then an additional class instance that contains
intermediate results is returned.
Returns
-------
lm : float
Lagrange multiplier test statistic.
lmpval : float
The p-value for Lagrange multiplier test.
fval : float
The value of the f statistic for F test, alternative version of the
same test based on F test for the parameter restriction.
fpval : float
The pvalue for F test.
res_store : ResultsStore
A class instance that holds intermediate results. Only returned if
store=True.
Notes
-----
BG adds lags of residual to exog in the design matrix for the auxiliary
regression with residuals as endog. See [1]_, section 12.7.1.
References
----------
.. [1] Greene, W. H. Econometric Analysis. New Jersey. Prentice Hall;
5th edition. (2002). | acorr_breusch_godfrey | python | statsmodels/statsmodels | statsmodels/stats/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/diagnostic.py | BSD-3-Clause |
def _check_het_test(x: np.ndarray, test_name: str) -> None:
"""
Check validity of the exogenous regressors in a heteroskedasticity test
Parameters
----------
x : ndarray
The exogenous regressor array
test_name : str
The test name for the exception
"""
x_max = x.max(axis=0)
if (
not np.any(((x_max - x.min(axis=0)) == 0) & (x_max != 0))
or x.shape[1] < 2
):
raise ValueError(
f"{test_name} test requires exog to have at least "
"two columns where one is a constant."
) | Check validity of the exogenous regressors in a heteroskedasticity test
Parameters
----------
x : ndarray
The exogenous regressor array
test_name : str
The test name for the exception | _check_het_test | python | statsmodels/statsmodels | statsmodels/stats/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/diagnostic.py | BSD-3-Clause |
def het_white(resid, exog):
"""
White's Lagrange Multiplier Test for Heteroscedasticity.
Parameters
----------
resid : array_like
The residuals. The squared residuals are used as the endogenous
variable.
exog : array_like
The explanatory variables for the variance. Squares and interaction
terms are automatically included in the auxiliary regression.
Returns
-------
lm : float
The lagrange multiplier statistic.
lm_pvalue :float
The p-value of lagrange multiplier test.
fvalue : float
The f-statistic of the hypothesis that the error variance does not
depend on x. This is an alternative test variant not the original
LM test.
f_pvalue : float
The p-value for the f-statistic.
Notes
-----
Assumes x contains constant (for counting dof).
question: does f-statistic make sense? constant ?
References
----------
Greene section 11.4.1 5th edition p. 222. Test statistic reproduces
Greene 5th, example 11.3.
"""
x = array_like(exog, "exog", ndim=2)
y = array_like(resid, "resid", ndim=2, shape=(x.shape[0], 1))
_check_het_test(x, "White's heteroskedasticity")
nobs, nvars0 = x.shape
i0, i1 = np.triu_indices(nvars0)
exog = x[:, i0] * x[:, i1]
nobs, nvars = exog.shape
assert nvars == nvars0 * (nvars0 - 1) / 2. + nvars0
resols = OLS(y ** 2, exog).fit()
fval = resols.fvalue
fpval = resols.f_pvalue
lm = nobs * resols.rsquared
# Note: degrees of freedom for LM test is nvars minus constant
# degrees of freedom take possible reduced rank in exog into account
# df_model checks the rank to determine df
# extra calculation that can be removed:
assert resols.df_model == np.linalg.matrix_rank(exog) - 1
lmpval = stats.chi2.sf(lm, resols.df_model)
return lm, lmpval, fval, fpval | White's Lagrange Multiplier Test for Heteroscedasticity.
Parameters
----------
resid : array_like
The residuals. The squared residuals are used as the endogenous
variable.
exog : array_like
The explanatory variables for the variance. Squares and interaction
terms are automatically included in the auxiliary regression.
Returns
-------
lm : float
The lagrange multiplier statistic.
lm_pvalue :float
The p-value of lagrange multiplier test.
fvalue : float
The f-statistic of the hypothesis that the error variance does not
depend on x. This is an alternative test variant not the original
LM test.
f_pvalue : float
The p-value for the f-statistic.
Notes
-----
Assumes x contains constant (for counting dof).
question: does f-statistic make sense? constant ?
References
----------
Greene section 11.4.1 5th edition p. 222. Test statistic reproduces
Greene 5th, example 11.3. | het_white | python | statsmodels/statsmodels | statsmodels/stats/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/diagnostic.py | BSD-3-Clause |
def het_goldfeldquandt(y, x, idx=None, split=None, drop=None,
alternative="increasing", store=False):
"""
Goldfeld-Quandt homoskedasticity test.
This test examines whether the residual variance is the same in 2
subsamples.
Parameters
----------
y : array_like
endogenous variable
x : array_like
exogenous variable, regressors
idx : int, default None
column index of variable according to which observations are
sorted for the split
split : {int, float}, default None
If an integer, this is the index at which sample is split.
If a float in 0<split<1 then split is interpreted as fraction
of the observations in the first sample. If None, uses nobs//2.
drop : {int, float}, default None
If this is not None, then observation are dropped from the middle
part of the sorted series. If 0<split<1 then split is interpreted
as fraction of the number of observations to be dropped.
Note: Currently, observations are dropped between split and
split+drop, where split and drop are the indices (given by rounding
if specified as fraction). The first sample is [0:split], the
second sample is [split+drop:]
alternative : {"increasing", "decreasing", "two-sided"}
The default is increasing. This specifies the alternative for the
p-value calculation.
store : bool, default False
Flag indicating to return the regression results
Returns
-------
fval : float
value of the F-statistic
pval : float
p-value of the hypothesis that the variance in one subsample is
larger than in the other subsample
ordering : str
The ordering used in the alternative.
res_store : ResultsStore, optional
Storage for the intermediate and final results that are calculated
Notes
-----
The Null hypothesis is that the variance in the two sub-samples are the
same. The alternative hypothesis, can be increasing, i.e. the variance
in the second sample is larger than in the first, or decreasing or
two-sided.
Results are identical R, but the drop option is defined differently.
(sorting by idx not tested yet)
"""
x = np.asarray(x)
y = np.asarray(y) # **2
nobs, nvars = x.shape
if split is None:
split = nobs // 2
elif (0 < split) and (split < 1):
split = int(nobs * split)
if drop is None:
start2 = split
elif (0 < drop) and (drop < 1):
start2 = split + int(nobs * drop)
else:
start2 = split + drop
if idx is not None:
xsortind = np.argsort(x[:, idx])
y = y[xsortind]
x = x[xsortind, :]
resols1 = OLS(y[:split], x[:split]).fit()
resols2 = OLS(y[start2:], x[start2:]).fit()
fval = resols2.mse_resid / resols1.mse_resid
# if fval>1:
if alternative.lower() in ["i", "inc", "increasing"]:
fpval = stats.f.sf(fval, resols1.df_resid, resols2.df_resid)
ordering = "increasing"
elif alternative.lower() in ["d", "dec", "decreasing"]:
fpval = stats.f.sf(1. / fval, resols2.df_resid, resols1.df_resid)
ordering = "decreasing"
elif alternative.lower() in ["2", "2-sided", "two-sided"]:
fpval_sm = stats.f.cdf(fval, resols2.df_resid, resols1.df_resid)
fpval_la = stats.f.sf(fval, resols2.df_resid, resols1.df_resid)
fpval = 2 * min(fpval_sm, fpval_la)
ordering = "two-sided"
else:
raise ValueError("invalid alternative")
if store:
res = ResultsStore()
res.__doc__ = "Test Results for Goldfeld-Quandt test of" \
"heterogeneity"
res.fval = fval
res.fpval = fpval
res.df_fval = (resols2.df_resid, resols1.df_resid)
res.resols1 = resols1
res.resols2 = resols2
res.ordering = ordering
res.split = split
res._str = """\
The Goldfeld-Quandt test for null hypothesis that the variance in the second
subsample is {} than in the first subsample:
F-statistic ={:8.4f} and p-value ={:8.4f}""".format(ordering, fval, fpval)
return fval, fpval, ordering, res
return fval, fpval, ordering | Goldfeld-Quandt homoskedasticity test.
This test examines whether the residual variance is the same in 2
subsamples.
Parameters
----------
y : array_like
endogenous variable
x : array_like
exogenous variable, regressors
idx : int, default None
column index of variable according to which observations are
sorted for the split
split : {int, float}, default None
If an integer, this is the index at which sample is split.
If a float in 0<split<1 then split is interpreted as fraction
of the observations in the first sample. If None, uses nobs//2.
drop : {int, float}, default None
If this is not None, then observation are dropped from the middle
part of the sorted series. If 0<split<1 then split is interpreted
as fraction of the number of observations to be dropped.
Note: Currently, observations are dropped between split and
split+drop, where split and drop are the indices (given by rounding
if specified as fraction). The first sample is [0:split], the
second sample is [split+drop:]
alternative : {"increasing", "decreasing", "two-sided"}
The default is increasing. This specifies the alternative for the
p-value calculation.
store : bool, default False
Flag indicating to return the regression results
Returns
-------
fval : float
value of the F-statistic
pval : float
p-value of the hypothesis that the variance in one subsample is
larger than in the other subsample
ordering : str
The ordering used in the alternative.
res_store : ResultsStore, optional
Storage for the intermediate and final results that are calculated
Notes
-----
The Null hypothesis is that the variance in the two sub-samples are the
same. The alternative hypothesis, can be increasing, i.e. the variance
in the second sample is larger than in the first, or decreasing or
two-sided.
Results are identical R, but the drop option is defined differently.
(sorting by idx not tested yet) | het_goldfeldquandt | python | statsmodels/statsmodels | statsmodels/stats/diagnostic.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/diagnostic.py | BSD-3-Clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.