code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def test_orthogonal_target(self): """ Rotation towards target matrix example http://www.stat.ucla.edu/research/gpa """ A = self.str2matrix(""" .830 -.396 .818 -.469 .777 -.470 .798 -.401 .786 .500 .672 .458 .594 .444 .647 .333 """) H = self.str2matrix(""" .8 -.3 .8 -.4 .7 -.4 .9 -.4 .8 .5 .6 .4 .5 .4 .6 .3 """) def vgQ(L=None, A=None, T=None): return vgQ_target(H, L=L, A=A, T=T) L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal') T_analytic = target_rotation(A, H) self.assertTrue(np.allclose(T, T_analytic, atol=1e-05))
Rotation towards target matrix example http://www.stat.ucla.edu/research/gpa
test_orthogonal_target
python
statsmodels/statsmodels
statsmodels/multivariate/factor_rotation/tests/test_rotation.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py
BSD-3-Clause
def get_quartimin_example(cls): A = cls.get_A() table_required = cls.str2matrix(""" 0.00000 0.42806 -0.46393 1.00000 1.00000 0.41311 -0.57313 0.25000 2.00000 0.38238 -0.36652 0.50000 3.00000 0.31850 -0.21011 0.50000 4.00000 0.20937 -0.13838 0.50000 5.00000 0.12379 -0.35583 0.25000 6.00000 0.04289 -0.53244 0.50000 7.00000 0.01098 -0.86649 0.50000 8.00000 0.00566 -1.65798 0.50000 9.00000 0.00558 -2.13212 0.25000 10.00000 0.00557 -2.49020 0.25000 11.00000 0.00557 -2.84585 0.25000 12.00000 0.00557 -3.20320 0.25000 13.00000 0.00557 -3.56143 0.25000 14.00000 0.00557 -3.92005 0.25000 15.00000 0.00557 -4.27885 0.25000 16.00000 0.00557 -4.63772 0.25000 17.00000 0.00557 -4.99663 0.25000 18.00000 0.00557 -5.35555 0.25000 """) L_required = cls.str2matrix(""" 0.891822 0.056015 0.953680 -0.023246 0.929150 -0.046503 0.876683 0.033658 0.013701 0.925000 -0.017265 0.821253 -0.052445 0.764953 0.085890 0.683115 """) return A, table_required, L_required
) L_required = cls.str2matrix(
get_quartimin_example
python
statsmodels/statsmodels
statsmodels/multivariate/factor_rotation/tests/test_rotation.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py
BSD-3-Clause
def get_biquartimin_example(cls): A = cls.get_A() table_required = cls.str2matrix(""" 0.00000 0.21632 -0.54955 1.00000 1.00000 0.19519 -0.46174 0.50000 2.00000 0.09479 -0.16365 1.00000 3.00000 -0.06302 -0.32096 0.50000 4.00000 -0.21304 -0.46562 1.00000 5.00000 -0.33199 -0.33287 1.00000 6.00000 -0.35108 -0.63990 0.12500 7.00000 -0.35543 -1.20916 0.12500 8.00000 -0.35568 -2.61213 0.12500 9.00000 -0.35568 -2.97910 0.06250 10.00000 -0.35568 -3.32645 0.06250 11.00000 -0.35568 -3.66021 0.06250 12.00000 -0.35568 -3.98564 0.06250 13.00000 -0.35568 -4.30635 0.06250 14.00000 -0.35568 -4.62451 0.06250 15.00000 -0.35568 -4.94133 0.06250 16.00000 -0.35568 -5.25745 0.06250 """) L_required = cls.str2matrix(""" 1.01753 -0.13657 1.11338 -0.24643 1.09200 -0.26890 1.00676 -0.16010 -0.26534 1.11371 -0.26972 0.99553 -0.29341 0.93561 -0.10806 0.80513 """) return A, table_required, L_required
) L_required = cls.str2matrix(
get_biquartimin_example
python
statsmodels/statsmodels
statsmodels/multivariate/factor_rotation/tests/test_rotation.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py
BSD-3-Clause
def get_biquartimin_example_derivative_free(cls): A = cls.get_A() table_required = cls.str2matrix(""" 0.00000 0.21632 -0.54955 1.00000 1.00000 0.19519 -0.46174 0.50000 2.00000 0.09479 -0.16365 1.00000 3.00000 -0.06302 -0.32096 0.50000 4.00000 -0.21304 -0.46562 1.00000 5.00000 -0.33199 -0.33287 1.00000 6.00000 -0.35108 -0.63990 0.12500 7.00000 -0.35543 -1.20916 0.12500 8.00000 -0.35568 -2.61213 0.12500 9.00000 -0.35568 -2.97910 0.06250 10.00000 -0.35568 -3.32645 0.06250 11.00000 -0.35568 -3.66021 0.06250 12.00000 -0.35568 -3.98564 0.06250 13.00000 -0.35568 -4.30634 0.06250 14.00000 -0.35568 -4.62451 0.06250 15.00000 -0.35568 -4.94133 0.06250 16.00000 -0.35568 -6.32435 0.12500 """) L_required = cls.str2matrix(""" 1.01753 -0.13657 1.11338 -0.24643 1.09200 -0.26890 1.00676 -0.16010 -0.26534 1.11371 -0.26972 0.99553 -0.29342 0.93561 -0.10806 0.80513 """) return A, table_required, L_required
) L_required = cls.str2matrix(
get_biquartimin_example_derivative_free
python
statsmodels/statsmodels
statsmodels/multivariate/factor_rotation/tests/test_rotation.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py
BSD-3-Clause
def get_quartimax_example_derivative_free(cls): A = cls.get_A() table_required = cls.str2matrix(""" 0.00000 -0.72073 -0.65498 1.00000 1.00000 -0.88561 -0.34614 2.00000 2.00000 -1.01992 -1.07152 1.00000 3.00000 -1.02237 -1.51373 0.50000 4.00000 -1.02269 -1.96205 0.50000 5.00000 -1.02273 -2.41116 0.50000 6.00000 -1.02273 -2.86037 0.50000 7.00000 -1.02273 -3.30959 0.50000 8.00000 -1.02273 -3.75881 0.50000 9.00000 -1.02273 -4.20804 0.50000 10.00000 -1.02273 -4.65726 0.50000 11.00000 -1.02273 -5.10648 0.50000 """) L_required = cls.str2matrix(""" 0.89876 0.19482 0.93394 0.12974 0.90213 0.10386 0.87651 0.17128 0.31558 0.87647 0.25113 0.77349 0.19801 0.71468 0.30786 0.65933 """) return A, table_required, L_required
) L_required = cls.str2matrix(
get_quartimax_example_derivative_free
python
statsmodels/statsmodels
statsmodels/multivariate/factor_rotation/tests/test_rotation.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py
BSD-3-Clause
def test_orthomax(self): """ Quartimax example http://www.stat.ucla.edu/research/gpa """ A = self.get_A() def vgQ(L=None, A=None, T=None): return orthomax_objective(L=L, A=A, T=T, gamma=0, return_gradient=True) L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal') table_required = self.str2matrix(""" 0.00000 -0.72073 -0.65498 1.00000 1.00000 -0.88561 -0.34614 2.00000 2.00000 -1.01992 -1.07152 1.00000 3.00000 -1.02237 -1.51373 0.50000 4.00000 -1.02269 -1.96205 0.50000 5.00000 -1.02273 -2.41116 0.50000 6.00000 -1.02273 -2.86037 0.50000 7.00000 -1.02273 -3.30959 0.50000 8.00000 -1.02273 -3.75881 0.50000 9.00000 -1.02273 -4.20804 0.50000 10.00000 -1.02273 -4.65726 0.50000 11.00000 -1.02273 -5.10648 0.50000 """) L_required = self.str2matrix(""" 0.89876 0.19482 0.93394 0.12974 0.90213 0.10386 0.87651 0.17128 0.31558 0.87647 0.25113 0.77349 0.19801 0.71468 0.30786 0.65933 """) self.assertTrue(np.allclose(table, table_required, atol=1e-05)) self.assertTrue(np.allclose(L, L_required, atol=1e-05)) # oblimin criterion gives same result def vgQ(L=None, A=None, T=None): return oblimin_objective(L=L, A=A, T=T, gamma=0, rotation_method='orthogonal', return_gradient=True) L_oblimin, phi2, T2, table2 = GPA(A, vgQ=vgQ, rotation_method='orthogonal') self.assertTrue(np.allclose(L, L_oblimin, atol=1e-05)) # derivative free quartimax out = self.get_quartimax_example_derivative_free() A, table_required, L_required = out def ff(L=None, A=None, T=None): return orthomax_objective(L=L, A=A, T=T, gamma=0, return_gradient=False) L, phi, T, table = GPA(A, ff=ff, rotation_method='orthogonal') self.assertTrue(np.allclose(table, table_required, atol=1e-05)) self.assertTrue(np.allclose(L, L_required, atol=1e-05))
Quartimax example http://www.stat.ucla.edu/research/gpa
test_orthomax
python
statsmodels/statsmodels
statsmodels/multivariate/factor_rotation/tests/test_rotation.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py
BSD-3-Clause
def test_equivalence_orthomax_oblimin(self): """ These criteria should be equivalent when restricted to orthogonal rotation. See Hartman 1976 page 299. """ A = self.get_A() gamma = 0 # quartimax def vgQ(L=None, A=None, T=None): return orthomax_objective(L=L, A=A, T=T, gamma=gamma, return_gradient=True) L_orthomax, phi, T, table = GPA( A, vgQ=vgQ, rotation_method='orthogonal') def vgQ(L=None, A=None, T=None): return oblimin_objective(L=L, A=A, T=T, gamma=gamma, rotation_method='orthogonal', return_gradient=True) L_oblimin, phi2, T2, table2 = GPA(A, vgQ=vgQ, rotation_method='orthogonal') self.assertTrue(np.allclose(L_orthomax, L_oblimin, atol=1e-05)) gamma = 1 # varimax def vgQ(L=None, A=None, T=None): return orthomax_objective(L=L, A=A, T=T, gamma=gamma, return_gradient=True) L_orthomax, phi, T, table = GPA( A, vgQ=vgQ, rotation_method='orthogonal') def vgQ(L=None, A=None, T=None): return oblimin_objective(L=L, A=A, T=T, gamma=gamma, rotation_method='orthogonal', return_gradient=True) L_oblimin, phi2, T2, table2 = GPA( A, vgQ=vgQ, rotation_method='orthogonal') self.assertTrue(np.allclose(L_orthomax, L_oblimin, atol=1e-05))
These criteria should be equivalent when restricted to orthogonal rotation. See Hartman 1976 page 299.
test_equivalence_orthomax_oblimin
python
statsmodels/statsmodels
statsmodels/multivariate/factor_rotation/tests/test_rotation.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py
BSD-3-Clause
def test_orthogonal_target(self): """ Rotation towards target matrix example http://www.stat.ucla.edu/research/gpa """ A = self.get_A() H = self.str2matrix(""" .8 -.3 .8 -.4 .7 -.4 .9 -.4 .8 .5 .6 .4 .5 .4 .6 .3 """) def vgQ(L=None, A=None, T=None): return vgQ_target(H, L=L, A=A, T=T) L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal') table_required = self.str2matrix(""" 0.00000 0.05925 -0.61244 1.00000 1.00000 0.05444 -1.14701 0.12500 2.00000 0.05403 -1.68194 0.12500 3.00000 0.05399 -2.21689 0.12500 4.00000 0.05399 -2.75185 0.12500 5.00000 0.05399 -3.28681 0.12500 6.00000 0.05399 -3.82176 0.12500 7.00000 0.05399 -4.35672 0.12500 8.00000 0.05399 -4.89168 0.12500 9.00000 0.05399 -5.42664 0.12500 """) L_required = self.str2matrix(""" 0.84168 -0.37053 0.83191 -0.44386 0.79096 -0.44611 0.80985 -0.37650 0.77040 0.52371 0.65774 0.47826 0.58020 0.46189 0.63656 0.35255 """) self.assertTrue(np.allclose(table, table_required, atol=1e-05)) self.assertTrue(np.allclose(L, L_required, atol=1e-05)) def ff(L=None, A=None, T=None): return ff_target(H, L=L, A=A, T=T) L2, phi, T2, table = GPA(A, ff=ff, rotation_method='orthogonal') self.assertTrue(np.allclose(L, L2, atol=1e-05)) self.assertTrue(np.allclose(T, T2, atol=1e-05)) def vgQ(L=None, A=None, T=None): return vgQ_target(H, L=L, A=A, T=T, rotation_method='oblique') L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='oblique') def ff(L=None, A=None, T=None): return ff_target(H, L=L, A=A, T=T, rotation_method='oblique') L2, phi, T2, table = GPA(A, ff=ff, rotation_method='oblique') self.assertTrue(np.allclose(L, L2, atol=1e-05)) self.assertTrue(np.allclose(T, T2, atol=1e-05))
Rotation towards target matrix example http://www.stat.ucla.edu/research/gpa
test_orthogonal_target
python
statsmodels/statsmodels
statsmodels/multivariate/factor_rotation/tests/test_rotation.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py
BSD-3-Clause
def test_orthogonal_partial_target(self): """ Rotation towards target matrix example http://www.stat.ucla.edu/research/gpa """ A = self.get_A() H = self.str2matrix(""" .8 -.3 .8 -.4 .7 -.4 .9 -.4 .8 .5 .6 .4 .5 .4 .6 .3 """) W = self.str2matrix(""" 1 0 0 1 0 0 1 1 1 0 1 0 0 1 1 0 """) def vgQ(L=None, A=None, T=None): return vgQ_partial_target(H, W, L=L, A=A, T=T) L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal') table_required = self.str2matrix(""" 0.00000 0.02559 -0.84194 1.00000 1.00000 0.02203 -1.27116 0.25000 2.00000 0.02154 -1.71198 0.25000 3.00000 0.02148 -2.15713 0.25000 4.00000 0.02147 -2.60385 0.25000 5.00000 0.02147 -3.05114 0.25000 6.00000 0.02147 -3.49863 0.25000 7.00000 0.02147 -3.94619 0.25000 8.00000 0.02147 -4.39377 0.25000 9.00000 0.02147 -4.84137 0.25000 10.00000 0.02147 -5.28897 0.25000 """) L_required = self.str2matrix(""" 0.84526 -0.36228 0.83621 -0.43571 0.79528 -0.43836 0.81349 -0.36857 0.76525 0.53122 0.65303 0.48467 0.57565 0.46754 0.63308 0.35876 """) self.assertTrue(np.allclose(table, table_required, atol=1e-05)) self.assertTrue(np.allclose(L, L_required, atol=1e-05)) def ff(L=None, A=None, T=None): return ff_partial_target(H, W, L=L, A=A, T=T) L2, phi, T2, table = GPA(A, ff=ff, rotation_method='orthogonal') self.assertTrue(np.allclose(L, L2, atol=1e-05)) self.assertTrue(np.allclose(T, T2, atol=1e-05))
Rotation towards target matrix example http://www.stat.ucla.edu/research/gpa
test_orthogonal_partial_target
python
statsmodels/statsmodels
statsmodels/multivariate/factor_rotation/tests/test_rotation.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py
BSD-3-Clause
def test_methods(self): """ Quartimax derivative free example http://www.stat.ucla.edu/research/gpa """ # orthomax, oblimin and CF are tested indirectly methods = ['quartimin', 'biquartimin', 'quartimax', 'biquartimax', 'varimax', 'equamax', 'parsimax', 'parsimony', 'target', 'partial_target'] for method in methods: method_args = [] if method == 'target': method_args = [self.get_H(), 'orthogonal'] self._test_template(method, *method_args) method_args = [self.get_H(), 'oblique'] self._test_template(method, *method_args) method_args = [self.get_H(), 'orthogonal'] self._test_template(method, *method_args, algorithm2='analytic') elif method == 'partial_target': method_args = [self.get_H(), self.get_W()] self._test_template(method, *method_args)
Quartimax derivative free example http://www.stat.ucla.edu/research/gpa
test_methods
python
statsmodels/statsmodels
statsmodels/multivariate/factor_rotation/tests/test_rotation.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/multivariate/factor_rotation/tests/test_rotation.py
BSD-3-Clause
def _faa_di_bruno_partitions(n): """ Return all non-negative integer solutions of the diophantine equation n*k_n + ... + 2*k_2 + 1*k_1 = n (1) Parameters ---------- n : int the r.h.s. of Eq. (1) Returns ------- partitions : list Each solution is itself a list of the form `[(m, k_m), ...]` for non-zero `k_m`. Notice that the index `m` is 1-based. Examples: --------- >>> _faa_di_bruno_partitions(2) [[(1, 2)], [(2, 1)]] >>> for p in _faa_di_bruno_partitions(4): ... assert 4 == sum(m * k for (m, k) in p) """ if n < 1: raise ValueError("Expected a positive integer; got %s instead" % n) try: return _faa_di_bruno_cache[n] except KeyError: # TODO: higher order terms # solve Eq. (31) from Blinninkov & Moessner here raise NotImplementedError('Higher order terms not yet implemented.')
Return all non-negative integer solutions of the diophantine equation n*k_n + ... + 2*k_2 + 1*k_1 = n (1) Parameters ---------- n : int the r.h.s. of Eq. (1) Returns ------- partitions : list Each solution is itself a list of the form `[(m, k_m), ...]` for non-zero `k_m`. Notice that the index `m` is 1-based. Examples: --------- >>> _faa_di_bruno_partitions(2) [[(1, 2)], [(2, 1)]] >>> for p in _faa_di_bruno_partitions(4): ... assert 4 == sum(m * k for (m, k) in p)
_faa_di_bruno_partitions
python
statsmodels/statsmodels
statsmodels/distributions/edgeworth.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/edgeworth.py
BSD-3-Clause
def cumulant_from_moments(momt, n): """Compute n-th cumulant given moments. Parameters ---------- momt : array_like `momt[j]` contains `(j+1)`-th moment. These can be raw moments around zero, or central moments (in which case, `momt[0]` == 0). n : int which cumulant to calculate (must be >1) Returns ------- kappa : float n-th cumulant. """ if n < 1: raise ValueError("Expected a positive integer. Got %s instead." % n) if len(momt) < n: raise ValueError("%s-th cumulant requires %s moments, " "only got %s." % (n, n, len(momt))) kappa = 0. for p in _faa_di_bruno_partitions(n): r = sum(k for (m, k) in p) term = (-1)**(r - 1) * factorial(r - 1) for (m, k) in p: term *= np.power(momt[m - 1] / factorial(m), k) / factorial(k) kappa += term kappa *= factorial(n) return kappa
Compute n-th cumulant given moments. Parameters ---------- momt : array_like `momt[j]` contains `(j+1)`-th moment. These can be raw moments around zero, or central moments (in which case, `momt[0]` == 0). n : int which cumulant to calculate (must be >1) Returns ------- kappa : float n-th cumulant.
cumulant_from_moments
python
statsmodels/statsmodels
statsmodels/distributions/edgeworth.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/edgeworth.py
BSD-3-Clause
def prob2cdf_grid(probs): """Cumulative probabilities from cell provabilites on a grid Parameters ---------- probs : array_like Rectangular grid of cell probabilities. Returns ------- cdf : ndarray Grid of cumulative probabilities with same shape as probs. """ cdf = np.asarray(probs).copy() k = cdf.ndim for i in range(k): cdf = cdf.cumsum(axis=i) return cdf
Cumulative probabilities from cell provabilites on a grid Parameters ---------- probs : array_like Rectangular grid of cell probabilities. Returns ------- cdf : ndarray Grid of cumulative probabilities with same shape as probs.
prob2cdf_grid
python
statsmodels/statsmodels
statsmodels/distributions/tools.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py
BSD-3-Clause
def cdf2prob_grid(cdf, prepend=0): """Cell probabilities from cumulative probabilities on a grid. Parameters ---------- cdf : array_like Grid of cumulative probabilities with same shape as probs. Returns ------- probs : ndarray Rectangular grid of cell probabilities. """ if prepend is None: prepend = np._NoValue prob = np.asarray(cdf).copy() k = prob.ndim for i in range(k): prob = np.diff(prob, prepend=prepend, axis=i) return prob
Cell probabilities from cumulative probabilities on a grid. Parameters ---------- cdf : array_like Grid of cumulative probabilities with same shape as probs. Returns ------- probs : ndarray Rectangular grid of cell probabilities.
cdf2prob_grid
python
statsmodels/statsmodels
statsmodels/distributions/tools.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py
BSD-3-Clause
def average_grid(values, coords=None, _method="slicing"): """Compute average for each cell in grid using endpoints Parameters ---------- values : array_like Values on a grid that will average over corner points of each cell. coords : None or list of array_like Grid coordinates for each axis use to compute volumne of cell. If None, then averaged values are not rescaled. _method : {"slicing", "convolve"} Grid averaging is implemented using numpy "slicing" or using scipy.signal "convolve". Returns ------- Grid with averaged cell values. """ k_dim = values.ndim if _method == "slicing": p = values.copy() for d in range(k_dim): # average (p[:-1] + p[1:]) / 2 over each axis sl1 = [slice(None, None, None)] * k_dim sl2 = [slice(None, None, None)] * k_dim sl1[d] = slice(None, -1, None) sl2[d] = slice(1, None, None) sl1 = tuple(sl1) sl2 = tuple(sl2) p = (p[sl1] + p[sl2]) / 2 elif _method == "convolve": from scipy import signal p = signal.convolve(values, 0.5**k_dim * np.ones([2] * k_dim), mode="valid") if coords is not None: dx = np.array(1) for d in range(k_dim): dx = dx[..., None] * np.diff(coords[d]) p = p * dx return p
Compute average for each cell in grid using endpoints Parameters ---------- values : array_like Values on a grid that will average over corner points of each cell. coords : None or list of array_like Grid coordinates for each axis use to compute volumne of cell. If None, then averaged values are not rescaled. _method : {"slicing", "convolve"} Grid averaging is implemented using numpy "slicing" or using scipy.signal "convolve". Returns ------- Grid with averaged cell values.
average_grid
python
statsmodels/statsmodels
statsmodels/distributions/tools.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py
BSD-3-Clause
def nearest_matrix_margins(mat, maxiter=100, tol=1e-8): """nearest matrix with uniform margins Parameters ---------- mat : array_like, 2-D Matrix that will be converted to have uniform margins. Currently, `mat` has to be two dimensional. maxiter : in Maximum number of iterations. tol : float Tolerance for convergence, defined for difference between largest and smallest margin in each dimension. Returns ------- ndarray, nearest matrix with uniform margins. Notes ----- This function is intended for internal use and will be generalized in future. API will change. changed in 0.14 to support k_dim > 2. """ pc = np.asarray(mat) converged = False for _ in range(maxiter): pc0 = pc.copy() for ax in range(pc.ndim): axs = tuple([i for i in range(pc.ndim) if not i == ax]) pc0 /= pc.sum(axis=axs, keepdims=True) pc = pc0 pc /= pc.sum() # check convergence mptps = [] for ax in range(pc.ndim): axs = tuple([i for i in range(pc.ndim) if not i == ax]) marg = pc.sum(axis=axs, keepdims=False) mptps.append(np.ptp(marg)) if max(mptps) < tol: converged = True break if not converged: from statsmodels.tools.sm_exceptions import ConvergenceWarning warnings.warn("Iterations did not converge, maxiter reached", ConvergenceWarning) return pc
nearest matrix with uniform margins Parameters ---------- mat : array_like, 2-D Matrix that will be converted to have uniform margins. Currently, `mat` has to be two dimensional. maxiter : in Maximum number of iterations. tol : float Tolerance for convergence, defined for difference between largest and smallest margin in each dimension. Returns ------- ndarray, nearest matrix with uniform margins. Notes ----- This function is intended for internal use and will be generalized in future. API will change. changed in 0.14 to support k_dim > 2.
nearest_matrix_margins
python
statsmodels/statsmodels
statsmodels/distributions/tools.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py
BSD-3-Clause
def _rankdata_no_ties(x): """rankdata without ties for 2-d array This is a simplified version for ranking data if there are no ties. Works vectorized across columns. See Also -------- scipy.stats.rankdata """ nobs, k_vars = x.shape ranks = np.ones((nobs, k_vars)) sidx = np.argsort(x, axis=0) ranks[sidx, np.arange(k_vars)] = np.arange(1, nobs + 1)[:, None] return ranks
rankdata without ties for 2-d array This is a simplified version for ranking data if there are no ties. Works vectorized across columns. See Also -------- scipy.stats.rankdata
_rankdata_no_ties
python
statsmodels/statsmodels
statsmodels/distributions/tools.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py
BSD-3-Clause
def frequencies_fromdata(data, k_bins, use_ranks=True): """count of observations in bins (histogram) currently only for bivariate data Parameters ---------- data : array_like Bivariate data with observations in rows and two columns. Binning is in unit rectangle [0, 1]^2. If use_rank is False, then data should be in unit interval. k_bins : int Number of bins along each dimension in the histogram use_ranks : bool If use_rank is True, then data will be converted to ranks without tie handling. Returns ------- bin counts : ndarray Frequencies are the number of observations in a given bin. Bin counts are a 2-dim array with k_bins rows and k_bins columns. Notes ----- This function is intended for internal use and will be generalized in future. API will change. """ data = np.asarray(data) k_dim = data.shape[-1] k = k_bins + 1 g2 = _Grid([k] * k_dim, eps=0) if use_ranks: data = _rankdata_no_ties(data) / (data.shape[0] + 1) # alternatives: scipy handles ties, but uses np.apply_along_axis # rvs = stats.rankdata(rvs, axis=0) / (rvs.shape[0] + 1) # rvs = (np.argsort(np.argsort(rvs, axis=0), axis=0) + 1 # ) / (rvs.shape[0] + 1) freqr, _ = np.histogramdd(data, bins=g2.x_marginal) return freqr
count of observations in bins (histogram) currently only for bivariate data Parameters ---------- data : array_like Bivariate data with observations in rows and two columns. Binning is in unit rectangle [0, 1]^2. If use_rank is False, then data should be in unit interval. k_bins : int Number of bins along each dimension in the histogram use_ranks : bool If use_rank is True, then data will be converted to ranks without tie handling. Returns ------- bin counts : ndarray Frequencies are the number of observations in a given bin. Bin counts are a 2-dim array with k_bins rows and k_bins columns. Notes ----- This function is intended for internal use and will be generalized in future. API will change.
frequencies_fromdata
python
statsmodels/statsmodels
statsmodels/distributions/tools.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py
BSD-3-Clause
def approx_copula_pdf(copula, k_bins=10, force_uniform=True, use_pdf=False): """Histogram probabilities as approximation to a copula density. Parameters ---------- copula : instance Instance of a copula class. Only the ``pdf`` method is used. k_bins : int Number of bins along each dimension in the approximating histogram. force_uniform : bool If true, then the pdf grid will be adjusted to have uniform margins using `nearest_matrix_margin`. If false, then no adjustment is done and the margins may not be exactly uniform. use_pdf : bool If false, then the grid cell probabilities will be computed from the copula cdf. If true, then the density, ``pdf``, is used and cell probabilities are approximated by averaging the pdf of the cell corners. This is only useful if the cdf is not available. Returns ------- bin probabilites : ndarray Probability that random variable falls in given bin. This corresponds to a discrete distribution, and is not scaled to bin size to form a piecewise uniform, histogram density. Bin probabilities are a k-dim array with k_bins segments in each dimensionrows. Notes ----- This function is intended for internal use and will be generalized in future. API will change. """ k_dim = copula.k_dim k = k_bins + 1 ks = tuple([k] * k_dim) if use_pdf: g = _Grid([k] * k_dim, eps=0.1 / k_bins) pdfg = copula.pdf(g.x_flat).reshape(*ks) # correct for bin size pdfg *= 1 / k**k_dim ag = average_grid(pdfg) if force_uniform: pdf_grid = nearest_matrix_margins(ag, maxiter=100, tol=1e-8) else: pdf_grid = ag / ag.sum() else: g = _Grid([k] * k_dim, eps=1e-6) cdfg = copula.cdf(g.x_flat).reshape(*ks) # correct for bin size pdf_grid = cdf2prob_grid(cdfg, prepend=None) # TODO: check boundary approximation, eg. undefined at zero # for now just normalize pdf_grid /= pdf_grid.sum() return pdf_grid
Histogram probabilities as approximation to a copula density. Parameters ---------- copula : instance Instance of a copula class. Only the ``pdf`` method is used. k_bins : int Number of bins along each dimension in the approximating histogram. force_uniform : bool If true, then the pdf grid will be adjusted to have uniform margins using `nearest_matrix_margin`. If false, then no adjustment is done and the margins may not be exactly uniform. use_pdf : bool If false, then the grid cell probabilities will be computed from the copula cdf. If true, then the density, ``pdf``, is used and cell probabilities are approximated by averaging the pdf of the cell corners. This is only useful if the cdf is not available. Returns ------- bin probabilites : ndarray Probability that random variable falls in given bin. This corresponds to a discrete distribution, and is not scaled to bin size to form a piecewise uniform, histogram density. Bin probabilities are a k-dim array with k_bins segments in each dimensionrows. Notes ----- This function is intended for internal use and will be generalized in future. API will change.
approx_copula_pdf
python
statsmodels/statsmodels
statsmodels/distributions/tools.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py
BSD-3-Clause
def _eval_bernstein_1d(x, fvals, method="binom"): """Evaluate 1-dimensional bernstein polynomial given grid of values. experimental, comparing methods Parameters ---------- x : array_like Values at which to evaluate the Bernstein polynomial. fvals : ndarray Grid values of coefficients for Bernstein polynomial basis in the weighted sum. method: "binom", "beta" or "bpoly" Method to construct Bernstein polynomial basis, used for comparison of parameterizations. - "binom" uses pmf of Binomial distribution - "beta" uses pdf of Beta distribution - "bpoly" uses one interval in scipy.interpolate.BPoly Returns ------- Bernstein polynomial at evaluation points, weighted sum of Bernstein polynomial basis. """ k_terms = fvals.shape[-1] xx = np.asarray(x) k = np.arange(k_terms).astype(float) n = k_terms - 1. if method.lower() == "binom": # Divide by 0 RuntimeWarning here with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) poly_base = stats.binom.pmf(k, n, xx[..., None]) bp_values = (fvals * poly_base).sum(-1) elif method.lower() == "bpoly": bpb = interpolate.BPoly(fvals[:, None], [0., 1]) bp_values = bpb(x) elif method.lower() == "beta": # Divide by 0 RuntimeWarning here with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) poly_base = stats.beta.pdf(xx[..., None], k + 1, n - k + 1) / (n + 1) bp_values = (fvals * poly_base).sum(-1) else: raise ValueError("method not recogized") return bp_values
Evaluate 1-dimensional bernstein polynomial given grid of values. experimental, comparing methods Parameters ---------- x : array_like Values at which to evaluate the Bernstein polynomial. fvals : ndarray Grid values of coefficients for Bernstein polynomial basis in the weighted sum. method: "binom", "beta" or "bpoly" Method to construct Bernstein polynomial basis, used for comparison of parameterizations. - "binom" uses pmf of Binomial distribution - "beta" uses pdf of Beta distribution - "bpoly" uses one interval in scipy.interpolate.BPoly Returns ------- Bernstein polynomial at evaluation points, weighted sum of Bernstein polynomial basis.
_eval_bernstein_1d
python
statsmodels/statsmodels
statsmodels/distributions/tools.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py
BSD-3-Clause
def _eval_bernstein_2d(x, fvals): """Evaluate 2-dimensional bernstein polynomial given grid of values experimental Parameters ---------- x : array_like Values at which to evaluate the Bernstein polynomial. fvals : ndarray Grid values of coefficients for Bernstein polynomial basis in the weighted sum. Returns ------- Bernstein polynomial at evaluation points, weighted sum of Bernstein polynomial basis. """ k_terms = fvals.shape k_dim = fvals.ndim if k_dim != 2: raise ValueError("`fval` needs to be 2-dimensional") xx = np.atleast_2d(x) if xx.shape[1] != 2: raise ValueError("x needs to be bivariate and have 2 columns") x1, x2 = xx.T n1, n2 = k_terms[0] - 1, k_terms[1] - 1 k1 = np.arange(k_terms[0]).astype(float) k2 = np.arange(k_terms[1]).astype(float) # we are building a nobs x n1 x n2 array poly_base = (stats.binom.pmf(k1[None, :, None], n1, x1[:, None, None]) * stats.binom.pmf(k2[None, None, :], n2, x2[:, None, None])) bp_values = (fvals * poly_base).sum(-1).sum(-1) return bp_values
Evaluate 2-dimensional bernstein polynomial given grid of values experimental Parameters ---------- x : array_like Values at which to evaluate the Bernstein polynomial. fvals : ndarray Grid values of coefficients for Bernstein polynomial basis in the weighted sum. Returns ------- Bernstein polynomial at evaluation points, weighted sum of Bernstein polynomial basis.
_eval_bernstein_2d
python
statsmodels/statsmodels
statsmodels/distributions/tools.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py
BSD-3-Clause
def _eval_bernstein_dd(x, fvals): """Evaluate d-dimensional bernstein polynomial given grid of valuesv experimental Parameters ---------- x : array_like Values at which to evaluate the Bernstein polynomial. fvals : ndarray Grid values of coefficients for Bernstein polynomial basis in the weighted sum. Returns ------- Bernstein polynomial at evaluation points, weighted sum of Bernstein polynomial basis. """ k_terms = fvals.shape k_dim = fvals.ndim xx = np.atleast_2d(x) # The following loop is a tricky # we add terms for each x and expand dimension of poly base in each # iteration using broadcasting poly_base = np.zeros(x.shape[0]) for i in range(k_dim): ki = np.arange(k_terms[i]).astype(float) for _ in range(i+1): ki = ki[..., None] ni = k_terms[i] - 1 xi = xx[:, i] poly_base = poly_base[None, ...] + stats.binom._logpmf(ki, ni, xi) poly_base = np.exp(poly_base) bp_values = fvals.T[..., None] * poly_base for i in range(k_dim): bp_values = bp_values.sum(0) return bp_values
Evaluate d-dimensional bernstein polynomial given grid of valuesv experimental Parameters ---------- x : array_like Values at which to evaluate the Bernstein polynomial. fvals : ndarray Grid values of coefficients for Bernstein polynomial basis in the weighted sum. Returns ------- Bernstein polynomial at evaluation points, weighted sum of Bernstein polynomial basis.
_eval_bernstein_dd
python
statsmodels/statsmodels
statsmodels/distributions/tools.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py
BSD-3-Clause
def _ecdf_mv(data, method="seq", use_ranks=True): """ Multivariate empiricial distribution function, empirical copula Notes ----- Method "seq" is faster than method "brute", but supports mainly bivariate case. Speed advantage of "seq" is increasing in number of observations and decreasing in number of variables. (see Segers ...) Warning: This does not handle ties. The ecdf is based on univariate ranks without ties. The assignment of ranks to ties depends on the sorting algorithm and the initial ordering of the data. When the original data is used instead of ranks, then method "brute" computes the correct ecdf counts even in the case of ties. """ x = np.asarray(data) n = x.shape[0] if use_ranks: x = _rankdata_no_ties(x) / n if method == "brute": count = [((x <= x[i]).all(1)).sum() for i in range(n)] count = np.asarray(count) elif method.startswith("seq"): sort_idx0 = np.argsort(x[:, 0]) x_s0 = x[sort_idx0] x1 = x_s0[:, 1:] count_smaller = [(x1[:i] <= x1[i]).all(1).sum() + 1 for i in range(n)] count = np.empty(x.shape[0]) count[sort_idx0] = count_smaller else: raise ValueError("method not available") return count, x
Multivariate empiricial distribution function, empirical copula Notes ----- Method "seq" is faster than method "brute", but supports mainly bivariate case. Speed advantage of "seq" is increasing in number of observations and decreasing in number of variables. (see Segers ...) Warning: This does not handle ties. The ecdf is based on univariate ranks without ties. The assignment of ranks to ties depends on the sorting algorithm and the initial ordering of the data. When the original data is used instead of ranks, then method "brute" computes the correct ecdf counts even in the case of ties.
_ecdf_mv
python
statsmodels/statsmodels
statsmodels/distributions/tools.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/tools.py
BSD-3-Clause
def get_distr(self, params): """frozen distribution instance of the discrete distribution. """ args = params distr = self.distr(*args) return distr
frozen distribution instance of the discrete distribution.
get_distr
python
statsmodels/statsmodels
statsmodels/distributions/discrete.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/discrete.py
BSD-3-Clause
def from_data(cls, data, k_bins): """Create distribution instance from data using histogram binning. Classmethod to construct a distribution instance. Parameters ---------- data : array_like Data with observation in rows and random variables in columns. Data can be 1-dimensional in the univariate case. k_bins : int or list Number or edges of bins to be used in numpy histogramdd. If k_bins is a scalar int, then the number of bins of each component will be equal to it. Returns ------- Instance of a Bernstein distribution """ data = np.asarray(data) if np.any(data < 0) or np.any(data > 1): raise ValueError("data needs to be in [0, 1]") if data.ndim == 1: data = data[:, None] k_dim = data.shape[1] if np.size(k_bins) == 1: k_bins = [k_bins] * k_dim bins = [np.linspace(-1 / ni, 1, ni + 2) for ni in k_bins] c, e = np.histogramdd(data, bins=bins, density=False) # TODO: check when we have zero observations, which bin? # check bins start at 0 exept leading bin assert all([ei[1] == 0 for ei in e]) c /= len(data) cdf_grid = prob2cdf_grid(c) return cls(cdf_grid)
Create distribution instance from data using histogram binning. Classmethod to construct a distribution instance. Parameters ---------- data : array_like Data with observation in rows and random variables in columns. Data can be 1-dimensional in the univariate case. k_bins : int or list Number or edges of bins to be used in numpy histogramdd. If k_bins is a scalar int, then the number of bins of each component will be equal to it. Returns ------- Instance of a Bernstein distribution
from_data
python
statsmodels/statsmodels
statsmodels/distributions/bernstein.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/bernstein.py
BSD-3-Clause
def cdf(self, x): """cdf values evaluated at x. Parameters ---------- x : array_like Points of multivariate random variable at which cdf is evaluated. This can be a single point with length equal to the dimension of the random variable, or two dimensional with points (observations) in rows and random variables in columns. In the univariate case, a 1-dimensional x will be interpreted as different points for evaluation. Returns ------- pdf values Notes ----- Warning: 2-dim x with many points can be memory intensive because currently the bernstein polynomials will be evaluated in a fully vectorized computation. """ x = np.asarray(x) if x.ndim == 1 and self.k_dim == 1: x = x[:, None] cdf_ = _eval_bernstein_dd(x, self.cdf_grid) return cdf_
cdf values evaluated at x. Parameters ---------- x : array_like Points of multivariate random variable at which cdf is evaluated. This can be a single point with length equal to the dimension of the random variable, or two dimensional with points (observations) in rows and random variables in columns. In the univariate case, a 1-dimensional x will be interpreted as different points for evaluation. Returns ------- pdf values Notes ----- Warning: 2-dim x with many points can be memory intensive because currently the bernstein polynomials will be evaluated in a fully vectorized computation.
cdf
python
statsmodels/statsmodels
statsmodels/distributions/bernstein.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/bernstein.py
BSD-3-Clause
def pdf(self, x): """pdf values evaluated at x. Parameters ---------- x : array_like Points of multivariate random variable at which pdf is evaluated. This can be a single point with length equal to the dimension of the random variable, or two dimensional with points (observations) in rows and random variables in columns. In the univariate case, a 1-dimensional x will be interpreted as different points for evaluation. Returns ------- cdf values Notes ----- Warning: 2-dim x with many points can be memory intensive because currently the bernstein polynomials will be evaluated in a fully vectorized computation. """ x = np.asarray(x) if x.ndim == 1 and self.k_dim == 1: x = x[:, None] # TODO: check usage of k_grid_product. Should this go into eval? pdf_ = self.k_grid_product * _eval_bernstein_dd(x, self.prob_grid) return pdf_
pdf values evaluated at x. Parameters ---------- x : array_like Points of multivariate random variable at which pdf is evaluated. This can be a single point with length equal to the dimension of the random variable, or two dimensional with points (observations) in rows and random variables in columns. In the univariate case, a 1-dimensional x will be interpreted as different points for evaluation. Returns ------- cdf values Notes ----- Warning: 2-dim x with many points can be memory intensive because currently the bernstein polynomials will be evaluated in a fully vectorized computation.
pdf
python
statsmodels/statsmodels
statsmodels/distributions/bernstein.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/bernstein.py
BSD-3-Clause
def get_marginal(self, idx): """Get marginal BernsteinDistribution. Parameters ---------- idx : int or list of int Index or indices of the component for which the marginal distribution is returned. Returns ------- BernsteinDistribution instance for the marginal distribution. """ # univariate if self.k_dim == 1: return self sl = [-1] * self.k_dim if np.shape(idx) == (): idx = [idx] for ii in idx: sl[ii] = slice(None, None, None) cdf_m = self.cdf_grid[tuple(sl)] bpd_marginal = BernsteinDistribution(cdf_m) return bpd_marginal
Get marginal BernsteinDistribution. Parameters ---------- idx : int or list of int Index or indices of the component for which the marginal distribution is returned. Returns ------- BernsteinDistribution instance for the marginal distribution.
get_marginal
python
statsmodels/statsmodels
statsmodels/distributions/bernstein.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/bernstein.py
BSD-3-Clause
def rvs(self, nobs): """Generate random numbers from distribution. Parameters ---------- nobs : int Number of random observations to generate. """ rvs_mnl = np.random.multinomial(nobs, self.prob_grid.flatten()) k_comp = self.k_dim rvs_m = [] for i in range(len(rvs_mnl)): if rvs_mnl[i] != 0: idx = np.unravel_index(i, self.prob_grid.shape) rvsi = [] for j in range(k_comp): n = self.k_grid[j] xgi = self._grid.x_marginal[j][idx[j]] # Note: x_marginal starts at 0 # x_marginal ends with 1 but that is not used by idx rvsi.append(stats.beta.rvs(n * xgi + 1, n * (1-xgi) + 0, size=rvs_mnl[i])) rvs_m.append(np.column_stack(rvsi)) rvsm = np.concatenate(rvs_m) return rvsm
Generate random numbers from distribution. Parameters ---------- nobs : int Number of random observations to generate.
rvs
python
statsmodels/statsmodels
statsmodels/distributions/bernstein.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/bernstein.py
BSD-3-Clause
def monotone_fn_inverter(fn, x, vectorized=True, **keywords): """ Given a monotone function fn (no checking is done to verify monotonicity) and a set of x values, return an linearly interpolated approximation to its inverse from its values on x. """ x = np.asarray(x) if vectorized: y = fn(x, **keywords) else: y = [] for _x in x: y.append(fn(_x, **keywords)) y = np.array(y) a = np.argsort(y) return interp1d(y[a], x[a])
Given a monotone function fn (no checking is done to verify monotonicity) and a set of x values, return an linearly interpolated approximation to its inverse from its values on x.
monotone_fn_inverter
python
statsmodels/statsmodels
statsmodels/distributions/empirical_distribution.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/empirical_distribution.py
BSD-3-Clause
def _make_index(prob,size): """ Returns a boolean index for given probabilities. Notes ----- prob = [.75,.25] means that there is a 75% chance of the first column being True and a 25% chance of the second column being True. The columns are mutually exclusive. """ rv = np.random.uniform(size=(size,1)) cumprob = np.cumsum(prob) return np.logical_and(np.r_[0,cumprob[:-1]] <= rv, rv < cumprob)
Returns a boolean index for given probabilities. Notes ----- prob = [.75,.25] means that there is a 75% chance of the first column being True and a 25% chance of the second column being True. The columns are mutually exclusive.
_make_index
python
statsmodels/statsmodels
statsmodels/distributions/mixture_rvs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/mixture_rvs.py
BSD-3-Clause
def mixture_rvs(prob, size, dist, kwargs=None): """ Sample from a mixture of distributions. Parameters ---------- prob : array_like Probability of sampling from each distribution in dist size : int The length of the returned sample. dist : array_like An iterable of distributions objects from scipy.stats. kwargs : tuple of dicts, optional A tuple of dicts. Each dict in kwargs can have keys loc, scale, and args to be passed to the respective distribution in dist. If not provided, the distribution defaults are used. Examples -------- Say we want 5000 random variables from mixture of normals with two distributions norm(-1,.5) and norm(1,.5) and we want to sample from the first with probability .75 and the second with probability .25. >>> from scipy import stats >>> prob = [.75,.25] >>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm], ... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5))) """ if len(prob) != len(dist): raise ValueError("You must provide as many probabilities as distributions") if not np.allclose(np.sum(prob), 1): raise ValueError("prob does not sum to 1") if kwargs is None: kwargs = ({},)*len(prob) idx = _make_index(prob,size) sample = np.empty(size) for i in range(len(prob)): sample_idx = idx[...,i] sample_size = sample_idx.sum() loc = kwargs[i].get('loc',0) scale = kwargs[i].get('scale',1) args = kwargs[i].get('args',()) sample[sample_idx] = dist[i].rvs(*args, **dict(loc=loc,scale=scale, size=sample_size)) return sample
Sample from a mixture of distributions. Parameters ---------- prob : array_like Probability of sampling from each distribution in dist size : int The length of the returned sample. dist : array_like An iterable of distributions objects from scipy.stats. kwargs : tuple of dicts, optional A tuple of dicts. Each dict in kwargs can have keys loc, scale, and args to be passed to the respective distribution in dist. If not provided, the distribution defaults are used. Examples -------- Say we want 5000 random variables from mixture of normals with two distributions norm(-1,.5) and norm(1,.5) and we want to sample from the first with probability .75 and the second with probability .25. >>> from scipy import stats >>> prob = [.75,.25] >>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm], ... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
mixture_rvs
python
statsmodels/statsmodels
statsmodels/distributions/mixture_rvs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/mixture_rvs.py
BSD-3-Clause
def pdf(self, x, prob, dist, kwargs=None): """ pdf a mixture of distributions. Parameters ---------- x : array_like Array containing locations where the PDF should be evaluated prob : array_like Probability of sampling from each distribution in dist dist : array_like An iterable of distributions objects from scipy.stats. kwargs : tuple of dicts, optional A tuple of dicts. Each dict in kwargs can have keys loc, scale, and args to be passed to the respective distribution in dist. If not provided, the distribution defaults are used. Examples -------- Say we want 5000 random variables from mixture of normals with two distributions norm(-1,.5) and norm(1,.5) and we want to sample from the first with probability .75 and the second with probability .25. >>> import numpy as np >>> from scipy import stats >>> from statsmodels.distributions.mixture_rvs import MixtureDistribution >>> x = np.arange(-4.0, 4.0, 0.01) >>> prob = [.75,.25] >>> mixture = MixtureDistribution() >>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], ... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5))) """ if len(prob) != len(dist): raise ValueError("You must provide as many probabilities as distributions") if not np.allclose(np.sum(prob), 1): raise ValueError("prob does not sum to 1") if kwargs is None: kwargs = ({},)*len(prob) for i in range(len(prob)): loc = kwargs[i].get('loc',0) scale = kwargs[i].get('scale',1) args = kwargs[i].get('args',()) if i == 0: #assume all broadcast the same as the first dist pdf_ = prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale) else: pdf_ += prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale) return pdf_
pdf a mixture of distributions. Parameters ---------- x : array_like Array containing locations where the PDF should be evaluated prob : array_like Probability of sampling from each distribution in dist dist : array_like An iterable of distributions objects from scipy.stats. kwargs : tuple of dicts, optional A tuple of dicts. Each dict in kwargs can have keys loc, scale, and args to be passed to the respective distribution in dist. If not provided, the distribution defaults are used. Examples -------- Say we want 5000 random variables from mixture of normals with two distributions norm(-1,.5) and norm(1,.5) and we want to sample from the first with probability .75 and the second with probability .25. >>> import numpy as np >>> from scipy import stats >>> from statsmodels.distributions.mixture_rvs import MixtureDistribution >>> x = np.arange(-4.0, 4.0, 0.01) >>> prob = [.75,.25] >>> mixture = MixtureDistribution() >>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], ... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
pdf
python
statsmodels/statsmodels
statsmodels/distributions/mixture_rvs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/mixture_rvs.py
BSD-3-Clause
def cdf(self, x, prob, dist, kwargs=None): """ cdf of a mixture of distributions. Parameters ---------- x : array_like Array containing locations where the CDF should be evaluated prob : array_like Probability of sampling from each distribution in dist size : int The length of the returned sample. dist : array_like An iterable of distributions objects from scipy.stats. kwargs : tuple of dicts, optional A tuple of dicts. Each dict in kwargs can have keys loc, scale, and args to be passed to the respective distribution in dist. If not provided, the distribution defaults are used. Examples -------- Say we want 5000 random variables from mixture of normals with two distributions norm(-1,.5) and norm(1,.5) and we want to sample from the first with probability .75 and the second with probability .25. >>> import numpy as np >>> from scipy import stats >>> from statsmodels.distributions.mixture_rvs import MixtureDistribution >>> x = np.arange(-4.0, 4.0, 0.01) >>> prob = [.75,.25] >>> mixture = MixtureDistribution() >>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], ... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5))) """ if len(prob) != len(dist): raise ValueError("You must provide as many probabilities as distributions") if not np.allclose(np.sum(prob), 1): raise ValueError("prob does not sum to 1") if kwargs is None: kwargs = ({},)*len(prob) for i in range(len(prob)): loc = kwargs[i].get('loc',0) scale = kwargs[i].get('scale',1) args = kwargs[i].get('args',()) if i == 0: #assume all broadcast the same as the first dist cdf_ = prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale) else: cdf_ += prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale) return cdf_
cdf of a mixture of distributions. Parameters ---------- x : array_like Array containing locations where the CDF should be evaluated prob : array_like Probability of sampling from each distribution in dist size : int The length of the returned sample. dist : array_like An iterable of distributions objects from scipy.stats. kwargs : tuple of dicts, optional A tuple of dicts. Each dict in kwargs can have keys loc, scale, and args to be passed to the respective distribution in dist. If not provided, the distribution defaults are used. Examples -------- Say we want 5000 random variables from mixture of normals with two distributions norm(-1,.5) and norm(1,.5) and we want to sample from the first with probability .75 and the second with probability .25. >>> import numpy as np >>> from scipy import stats >>> from statsmodels.distributions.mixture_rvs import MixtureDistribution >>> x = np.arange(-4.0, 4.0, 0.01) >>> prob = [.75,.25] >>> mixture = MixtureDistribution() >>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], ... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
cdf
python
statsmodels/statsmodels
statsmodels/distributions/mixture_rvs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/mixture_rvs.py
BSD-3-Clause
def mv_mixture_rvs(prob, size, dist, nvars, **kwargs): """ Sample from a mixture of multivariate distributions. Parameters ---------- prob : array_like Probability of sampling from each distribution in dist size : int The length of the returned sample. dist : array_like An iterable of distributions instances with callable method rvs. nvargs : int dimension of the multivariate distribution, could be inferred instead kwargs : tuple of dicts, optional ignored Examples -------- Say we want 2000 random variables from mixture of normals with two multivariate normal distributions, and we want to sample from the first with probability .4 and the second with probability .6. import statsmodels.sandbox.distributions.mv_normal as mvd cov3 = np.array([[ 1. , 0.5 , 0.75], [ 0.5 , 1.5 , 0.6 ], [ 0.75, 0.6 , 2. ]]) mu = np.array([-1, 0.0, 2.0]) mu2 = np.array([4, 2.0, 2.0]) mvn3 = mvd.MVNormal(mu, cov3) mvn32 = mvd.MVNormal(mu2, cov3/2., 4) rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3) """ if len(prob) != len(dist): raise ValueError("You must provide as many probabilities as distributions") if not np.allclose(np.sum(prob), 1): raise ValueError("prob does not sum to 1") if kwargs is None: kwargs = ({},)*len(prob) idx = _make_index(prob,size) sample = np.empty((size, nvars)) for i in range(len(prob)): sample_idx = idx[...,i] sample_size = sample_idx.sum() #loc = kwargs[i].get('loc',0) #scale = kwargs[i].get('scale',1) #args = kwargs[i].get('args',()) # use int to avoid numpy bug with np.random.multivariate_normal sample[sample_idx] = dist[i].rvs(size=int(sample_size)) return sample
Sample from a mixture of multivariate distributions. Parameters ---------- prob : array_like Probability of sampling from each distribution in dist size : int The length of the returned sample. dist : array_like An iterable of distributions instances with callable method rvs. nvargs : int dimension of the multivariate distribution, could be inferred instead kwargs : tuple of dicts, optional ignored Examples -------- Say we want 2000 random variables from mixture of normals with two multivariate normal distributions, and we want to sample from the first with probability .4 and the second with probability .6. import statsmodels.sandbox.distributions.mv_normal as mvd cov3 = np.array([[ 1. , 0.5 , 0.75], [ 0.5 , 1.5 , 0.6 ], [ 0.75, 0.6 , 2. ]]) mu = np.array([-1, 0.0, 2.0]) mu2 = np.array([4, 2.0, 2.0]) mvn3 = mvd.MVNormal(mu, cov3) mvn32 = mvd.MVNormal(mu2, cov3/2., 4) rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
mv_mixture_rvs
python
statsmodels/statsmodels
statsmodels/distributions/mixture_rvs.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/mixture_rvs.py
BSD-3-Clause
def deriv(self, t, *args): """First derivative of the dependence function implemented through numerical differentiation """ t = np.atleast_1d(t) return _approx_fprime_cs_scalar(t, self.evaluate)
First derivative of the dependence function implemented through numerical differentiation
deriv
python
statsmodels/statsmodels
statsmodels/distributions/copula/depfunc_ev.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/depfunc_ev.py
BSD-3-Clause
def deriv2(self, t, *args): """Second derivative of the dependence function implemented through numerical differentiation """ if np.size(t) == 1: d2 = approx_hess([t], self.evaluate, args=args)[0] else: d2 = np.array([approx_hess([ti], self.evaluate, args=args)[0, 0] for ti in t]) return d2
Second derivative of the dependence function implemented through numerical differentiation
deriv2
python
statsmodels/statsmodels
statsmodels/distributions/copula/depfunc_ev.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/depfunc_ev.py
BSD-3-Clause
def rvs(self, nobs=1, cop_args=None, marg_args=None, random_state=None): """Draw `n` in the half-open interval ``[0, 1)``. Sample the joint distribution. Parameters ---------- nobs : int, optional Number of samples to generate in the parameter space. Default is 1. cop_args : tuple Copula parameters. If None, then the copula parameters will be taken from the ``cop_args`` attribute created when initiializing the instance. marg_args : list of tuples Parameters for the marginal distributions. It can be None if none of the marginal distributions have parameters, otherwise it needs to be a list of tuples with the same length has the number of marginal distributions. The list can contain empty tuples for marginal distributions that do not take parameter arguments. random_state : {None, int, numpy.random.Generator}, optional If `seed` is None then the legacy singleton NumPy generator. This will change after 0.13 to use a fresh NumPy ``Generator``, so you should explicitly pass a seeded ``Generator`` if you need reproducible results. If `seed` is an int, a new ``Generator`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` instance then that instance is used. Returns ------- sample : array_like (n, d) Sample from the joint distribution. Notes ----- The random samples are generated by creating a sample with uniform margins from the copula, and using ``ppf`` to convert uniform margins to the one specified by the marginal distribution. See Also -------- statsmodels.tools.rng_qrng.check_random_state """ if cop_args is None: cop_args = self.cop_args if marg_args is None: marg_args = [()] * self.k_vars sample = self.copula.rvs(nobs=nobs, args=cop_args, random_state=random_state) for i, dist in enumerate(self.marginals): sample[:, i] = dist.ppf(0.5 + (1 - 1e-10) * (sample[:, i] - 0.5), *marg_args[i]) return sample
Draw `n` in the half-open interval ``[0, 1)``. Sample the joint distribution. Parameters ---------- nobs : int, optional Number of samples to generate in the parameter space. Default is 1. cop_args : tuple Copula parameters. If None, then the copula parameters will be taken from the ``cop_args`` attribute created when initiializing the instance. marg_args : list of tuples Parameters for the marginal distributions. It can be None if none of the marginal distributions have parameters, otherwise it needs to be a list of tuples with the same length has the number of marginal distributions. The list can contain empty tuples for marginal distributions that do not take parameter arguments. random_state : {None, int, numpy.random.Generator}, optional If `seed` is None then the legacy singleton NumPy generator. This will change after 0.13 to use a fresh NumPy ``Generator``, so you should explicitly pass a seeded ``Generator`` if you need reproducible results. If `seed` is an int, a new ``Generator`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` instance then that instance is used. Returns ------- sample : array_like (n, d) Sample from the joint distribution. Notes ----- The random samples are generated by creating a sample with uniform margins from the copula, and using ``ppf`` to convert uniform margins to the one specified by the marginal distribution. See Also -------- statsmodels.tools.rng_qrng.check_random_state
rvs
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def cdf(self, y, cop_args=None, marg_args=None): """CDF of copula distribution. Parameters ---------- y : array_like Values of random variable at which to evaluate cdf. If 2-dimensional, then components of multivariate random variable need to be in columns cop_args : tuple Copula parameters. If None, then the copula parameters will be taken from the ``cop_args`` attribute created when initiializing the instance. marg_args : list of tuples Parameters for the marginal distributions. It can be None if none of the marginal distributions have parameters, otherwise it needs to be a list of tuples with the same length has the number of marginal distributions. The list can contain empty tuples for marginal distributions that do not take parameter arguments. Returns ------- cdf values """ y = np.asarray(y) if cop_args is None: cop_args = self.cop_args if marg_args is None: marg_args = [()] * y.shape[-1] cdf_marg = [] for i in range(self.k_vars): cdf_marg.append(self.marginals[i].cdf(y[..., i], *marg_args[i])) u = np.column_stack(cdf_marg) if y.ndim == 1: u = u.squeeze() return self.copula.cdf(u, cop_args)
CDF of copula distribution. Parameters ---------- y : array_like Values of random variable at which to evaluate cdf. If 2-dimensional, then components of multivariate random variable need to be in columns cop_args : tuple Copula parameters. If None, then the copula parameters will be taken from the ``cop_args`` attribute created when initiializing the instance. marg_args : list of tuples Parameters for the marginal distributions. It can be None if none of the marginal distributions have parameters, otherwise it needs to be a list of tuples with the same length has the number of marginal distributions. The list can contain empty tuples for marginal distributions that do not take parameter arguments. Returns ------- cdf values
cdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def pdf(self, y, cop_args=None, marg_args=None): """PDF of copula distribution. Parameters ---------- y : array_like Values of random variable at which to evaluate cdf. If 2-dimensional, then components of multivariate random variable need to be in columns cop_args : tuple Copula parameters. If None, then the copula parameters will be taken from the ``cop_args`` attribute created when initiializing the instance. marg_args : list of tuples Parameters for the marginal distributions. It can be None if none of the marginal distributions have parameters, otherwise it needs to be a list of tuples with the same length has the number of marginal distributions. The list can contain empty tuples for marginal distributions that do not take parameter arguments. Returns ------- pdf values """ return np.exp(self.logpdf(y, cop_args=cop_args, marg_args=marg_args))
PDF of copula distribution. Parameters ---------- y : array_like Values of random variable at which to evaluate cdf. If 2-dimensional, then components of multivariate random variable need to be in columns cop_args : tuple Copula parameters. If None, then the copula parameters will be taken from the ``cop_args`` attribute created when initiializing the instance. marg_args : list of tuples Parameters for the marginal distributions. It can be None if none of the marginal distributions have parameters, otherwise it needs to be a list of tuples with the same length has the number of marginal distributions. The list can contain empty tuples for marginal distributions that do not take parameter arguments. Returns ------- pdf values
pdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def logpdf(self, y, cop_args=None, marg_args=None): """Log-pdf of copula distribution. Parameters ---------- y : array_like Values of random variable at which to evaluate cdf. If 2-dimensional, then components of multivariate random variable need to be in columns cop_args : tuple Copula parameters. If None, then the copula parameters will be taken from the ``cop_args`` attribute creating when initiializing the instance. marg_args : list of tuples Parameters for the marginal distributions. It can be None if none of the marginal distributions have parameters, otherwise it needs to be a list of tuples with the same length has the number of marginal distributions. The list can contain empty tuples for marginal distributions that do not take parameter arguments. Returns ------- log-pdf values """ y = np.asarray(y) if cop_args is None: cop_args = self.cop_args if marg_args is None: marg_args = tuple([()] * y.shape[-1]) lpdf = 0.0 cdf_marg = [] for i in range(self.k_vars): lpdf += self.marginals[i].logpdf(y[..., i], *marg_args[i]) cdf_marg.append(self.marginals[i].cdf(y[..., i], *marg_args[i])) u = np.column_stack(cdf_marg) if y.ndim == 1: u = u.squeeze() lpdf += self.copula.logpdf(u, cop_args) return lpdf
Log-pdf of copula distribution. Parameters ---------- y : array_like Values of random variable at which to evaluate cdf. If 2-dimensional, then components of multivariate random variable need to be in columns cop_args : tuple Copula parameters. If None, then the copula parameters will be taken from the ``cop_args`` attribute creating when initiializing the instance. marg_args : list of tuples Parameters for the marginal distributions. It can be None if none of the marginal distributions have parameters, otherwise it needs to be a list of tuples with the same length has the number of marginal distributions. The list can contain empty tuples for marginal distributions that do not take parameter arguments. Returns ------- log-pdf values
logpdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def rvs(self, nobs=1, args=(), random_state=None): """Draw `n` in the half-open interval ``[0, 1)``. Marginals are uniformly distributed. Parameters ---------- nobs : int, optional Number of samples to generate from the copula. Default is 1. args : tuple Arguments for copula parameters. The number of arguments depends on the copula. random_state : {None, int, numpy.random.Generator}, optional If `seed` is None then the legacy singleton NumPy generator. This will change after 0.13 to use a fresh NumPy ``Generator``, so you should explicitly pass a seeded ``Generator`` if you need reproducible results. If `seed` is an int, a new ``Generator`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` instance then that instance is used. Returns ------- sample : array_like (nobs, d) Sample from the copula. See Also -------- statsmodels.tools.rng_qrng.check_random_state """ raise NotImplementedError
Draw `n` in the half-open interval ``[0, 1)``. Marginals are uniformly distributed. Parameters ---------- nobs : int, optional Number of samples to generate from the copula. Default is 1. args : tuple Arguments for copula parameters. The number of arguments depends on the copula. random_state : {None, int, numpy.random.Generator}, optional If `seed` is None then the legacy singleton NumPy generator. This will change after 0.13 to use a fresh NumPy ``Generator``, so you should explicitly pass a seeded ``Generator`` if you need reproducible results. If `seed` is an int, a new ``Generator`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` instance then that instance is used. Returns ------- sample : array_like (nobs, d) Sample from the copula. See Also -------- statsmodels.tools.rng_qrng.check_random_state
rvs
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def pdf(self, u, args=()): """Probability density function of copula. Parameters ---------- u : array_like, 2-D Points of random variables in unit hypercube at which method is evaluated. The second (or last) dimension should be the same as the dimension of the random variable, e.g. 2 for bivariate copula. args : tuple Arguments for copula parameters. The number of arguments depends on the copula. Returns ------- pdf : ndarray, (nobs, k_dim) Copula pdf evaluated at points ``u``. """
Probability density function of copula. Parameters ---------- u : array_like, 2-D Points of random variables in unit hypercube at which method is evaluated. The second (or last) dimension should be the same as the dimension of the random variable, e.g. 2 for bivariate copula. args : tuple Arguments for copula parameters. The number of arguments depends on the copula. Returns ------- pdf : ndarray, (nobs, k_dim) Copula pdf evaluated at points ``u``.
pdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def logpdf(self, u, args=()): """Log of copula pdf, loglikelihood. Parameters ---------- u : array_like, 2-D Points of random variables in unit hypercube at which method is evaluated. The second (or last) dimension should be the same as the dimension of the random variable, e.g. 2 for bivariate copula. args : tuple Arguments for copula parameters. The number of arguments depends on the copula. Returns ------- cdf : ndarray, (nobs, k_dim) Copula log-pdf evaluated at points ``u``. """ return np.log(self.pdf(u, *args))
Log of copula pdf, loglikelihood. Parameters ---------- u : array_like, 2-D Points of random variables in unit hypercube at which method is evaluated. The second (or last) dimension should be the same as the dimension of the random variable, e.g. 2 for bivariate copula. args : tuple Arguments for copula parameters. The number of arguments depends on the copula. Returns ------- cdf : ndarray, (nobs, k_dim) Copula log-pdf evaluated at points ``u``.
logpdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def cdf(self, u, args=()): """Cumulative distribution function evaluated at points u. Parameters ---------- u : array_like, 2-D Points of random variables in unit hypercube at which method is evaluated. The second (or last) dimension should be the same as the dimension of the random variable, e.g. 2 for bivariate copula. args : tuple Arguments for copula parameters. The number of arguments depends on the copula. Returns ------- cdf : ndarray, (nobs, k_dim) Copula cdf evaluated at points ``u``. """
Cumulative distribution function evaluated at points u. Parameters ---------- u : array_like, 2-D Points of random variables in unit hypercube at which method is evaluated. The second (or last) dimension should be the same as the dimension of the random variable, e.g. 2 for bivariate copula. args : tuple Arguments for copula parameters. The number of arguments depends on the copula. Returns ------- cdf : ndarray, (nobs, k_dim) Copula cdf evaluated at points ``u``.
cdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def plot_scatter(self, sample=None, nobs=500, random_state=None, ax=None): """Sample the copula and plot. Parameters ---------- sample : array-like, optional The sample to plot. If not provided (the default), a sample is generated. nobs : int, optional Number of samples to generate from the copula. random_state : {None, int, numpy.random.Generator}, optional If `seed` is None then the legacy singleton NumPy generator. This will change after 0.13 to use a fresh NumPy ``Generator``, so you should explicitly pass a seeded ``Generator`` if you need reproducible results. If `seed` is an int, a new ``Generator`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` instance then that instance is used. ax : AxesSubplot, optional If given, this subplot is used to plot in instead of a new figure being created. Returns ------- fig : Figure If `ax` is None, the created figure. Otherwise the figure to which `ax` is connected. sample : array_like (n, d) Sample from the copula. See Also -------- statsmodels.tools.rng_qrng.check_random_state """ if self.k_dim != 2: raise ValueError("Can only plot 2-dimensional Copula.") if sample is None: sample = self.rvs(nobs=nobs, random_state=random_state) fig, ax = utils.create_mpl_ax(ax) ax.scatter(sample[:, 0], sample[:, 1]) ax.set_xlabel('u') ax.set_ylabel('v') return fig, sample
Sample the copula and plot. Parameters ---------- sample : array-like, optional The sample to plot. If not provided (the default), a sample is generated. nobs : int, optional Number of samples to generate from the copula. random_state : {None, int, numpy.random.Generator}, optional If `seed` is None then the legacy singleton NumPy generator. This will change after 0.13 to use a fresh NumPy ``Generator``, so you should explicitly pass a seeded ``Generator`` if you need reproducible results. If `seed` is an int, a new ``Generator`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` instance then that instance is used. ax : AxesSubplot, optional If given, this subplot is used to plot in instead of a new figure being created. Returns ------- fig : Figure If `ax` is None, the created figure. Otherwise the figure to which `ax` is connected. sample : array_like (n, d) Sample from the copula. See Also -------- statsmodels.tools.rng_qrng.check_random_state
plot_scatter
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def plot_pdf(self, ticks_nbr=10, ax=None): """Plot the PDF. Parameters ---------- ticks_nbr : int, optional Number of color isolines for the PDF. Default is 10. ax : AxesSubplot, optional If given, this subplot is used to plot in instead of a new figure being created. Returns ------- fig : Figure If `ax` is None, the created figure. Otherwise the figure to which `ax` is connected. """ from matplotlib import pyplot as plt if self.k_dim != 2: import warnings warnings.warn("Plotting 2-dimensional Copula.") n_samples = 100 eps = 1e-4 uu, vv = np.meshgrid(np.linspace(eps, 1 - eps, n_samples), np.linspace(eps, 1 - eps, n_samples)) points = np.vstack([uu.ravel(), vv.ravel()]).T data = self.pdf(points).T.reshape(uu.shape) min_ = np.nanpercentile(data, 5) max_ = np.nanpercentile(data, 95) fig, ax = utils.create_mpl_ax(ax) vticks = np.linspace(min_, max_, num=ticks_nbr) range_cbar = [min_, max_] cs = ax.contourf(uu, vv, data, vticks, antialiased=True, vmin=range_cbar[0], vmax=range_cbar[1]) ax.set_xlabel("u") ax.set_ylabel("v") ax.set_xlim(0, 1) ax.set_ylim(0, 1) ax.set_aspect('equal') cbar = plt.colorbar(cs, ticks=vticks) cbar.set_label('p') fig.tight_layout() return fig
Plot the PDF. Parameters ---------- ticks_nbr : int, optional Number of color isolines for the PDF. Default is 10. ax : AxesSubplot, optional If given, this subplot is used to plot in instead of a new figure being created. Returns ------- fig : Figure If `ax` is None, the created figure. Otherwise the figure to which `ax` is connected.
plot_pdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def tau_simulated(self, nobs=1024, random_state=None): """Kendall's tau based on simulated samples. Returns ------- tau : float Kendall's tau. """ x = self.rvs(nobs, random_state=random_state) return stats.kendalltau(x[:, 0], x[:, 1])[0]
Kendall's tau based on simulated samples. Returns ------- tau : float Kendall's tau.
tau_simulated
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def fit_corr_param(self, data): """Copula correlation parameter using Kendall's tau of sample data. Parameters ---------- data : array_like Sample data used to fit `theta` using Kendall's tau. Returns ------- corr_param : float Correlation parameter of the copula, ``theta`` in Archimedean and pearson correlation in elliptical. If k_dim > 2, then average tau is used. """ x = np.asarray(data) if x.shape[1] == 2: tau = stats.kendalltau(x[:, 0], x[:, 1])[0] else: k = self.k_dim taus = [stats.kendalltau(x[..., i], x[..., j])[0] for i in range(k) for j in range(i+1, k)] tau = np.mean(taus) return self._arg_from_tau(tau)
Copula correlation parameter using Kendall's tau of sample data. Parameters ---------- data : array_like Sample data used to fit `theta` using Kendall's tau. Returns ------- corr_param : float Correlation parameter of the copula, ``theta`` in Archimedean and pearson correlation in elliptical. If k_dim > 2, then average tau is used.
fit_corr_param
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def _arg_from_tau(self, tau): """Compute correlation parameter from tau. Parameters ---------- tau : float Kendall's tau. Returns ------- corr_param : float Correlation parameter of the copula, ``theta`` in Archimedean and pearson correlation in elliptical. """ raise NotImplementedError
Compute correlation parameter from tau. Parameters ---------- tau : float Kendall's tau. Returns ------- corr_param : float Correlation parameter of the copula, ``theta`` in Archimedean and pearson correlation in elliptical.
_arg_from_tau
python
statsmodels/statsmodels
statsmodels/distributions/copula/copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/copulas.py
BSD-3-Clause
def tau(self, corr=None): """Bivariate kendall's tau based on correlation coefficient. Parameters ---------- corr : None or float Pearson correlation. If corr is None, then the correlation will be taken from the copula attribute. Returns ------- Kendall's tau that corresponds to pearson correlation in the elliptical copula. """ if corr is None: corr = self.corr if corr.shape == (2, 2): corr = corr[0, 1] rho = 2 * np.arcsin(corr) / np.pi return rho
Bivariate kendall's tau based on correlation coefficient. Parameters ---------- corr : None or float Pearson correlation. If corr is None, then the correlation will be taken from the copula attribute. Returns ------- Kendall's tau that corresponds to pearson correlation in the elliptical copula.
tau
python
statsmodels/statsmodels
statsmodels/distributions/copula/elliptical.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/elliptical.py
BSD-3-Clause
def corr_from_tau(self, tau): """Pearson correlation from kendall's tau. Parameters ---------- tau : array_like Kendall's tau correlation coefficient. Returns ------- Pearson correlation coefficient for given tau in elliptical copula. This can be used as parameter for an elliptical copula. """ corr = np.sin(tau * np.pi / 2) return corr
Pearson correlation from kendall's tau. Parameters ---------- tau : array_like Kendall's tau correlation coefficient. Returns ------- Pearson correlation coefficient for given tau in elliptical copula. This can be used as parameter for an elliptical copula.
corr_from_tau
python
statsmodels/statsmodels
statsmodels/distributions/copula/elliptical.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/elliptical.py
BSD-3-Clause
def fit_corr_param(self, data): """Copula correlation parameter using Kendall's tau of sample data. Parameters ---------- data : array_like Sample data used to fit `theta` using Kendall's tau. Returns ------- corr_param : float Correlation parameter of the copula, ``theta`` in Archimedean and pearson correlation in elliptical. If k_dim > 2, then average tau is used. """ x = np.asarray(data) if x.shape[1] == 2: tau = stats.kendalltau(x[:, 0], x[:, 1])[0] else: k = self.k_dim tau = np.eye(k) for i in range(k): for j in range(i+1, k): tau_ij = stats.kendalltau(x[..., i], x[..., j])[0] tau[i, j] = tau[j, i] = tau_ij return self._arg_from_tau(tau)
Copula correlation parameter using Kendall's tau of sample data. Parameters ---------- data : array_like Sample data used to fit `theta` using Kendall's tau. Returns ------- corr_param : float Correlation parameter of the copula, ``theta`` in Archimedean and pearson correlation in elliptical. If k_dim > 2, then average tau is used.
fit_corr_param
python
statsmodels/statsmodels
statsmodels/distributions/copula/elliptical.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/elliptical.py
BSD-3-Clause
def dependence_tail(self, corr=None): """ Bivariate tail dependence parameter. Joe (2014) p. 182 Parameters ---------- corr : any Tail dependence for Gaussian copulas is always zero. Argument will be ignored Returns ------- Lower and upper tail dependence coefficients of the copula with given Pearson correlation coefficient. """ return 0, 0
Bivariate tail dependence parameter. Joe (2014) p. 182 Parameters ---------- corr : any Tail dependence for Gaussian copulas is always zero. Argument will be ignored Returns ------- Lower and upper tail dependence coefficients of the copula with given Pearson correlation coefficient.
dependence_tail
python
statsmodels/statsmodels
statsmodels/distributions/copula/elliptical.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/elliptical.py
BSD-3-Clause
def spearmans_rho(self, corr=None): """ Bivariate Spearman's rho based on correlation coefficient. Joe (2014) p. 182 Parameters ---------- corr : None or float Pearson correlation. If corr is None, then the correlation will be taken from the copula attribute. Returns ------- Spearman's rho that corresponds to pearson correlation in the elliptical copula. """ if corr is None: corr = self.corr if corr.shape == (2, 2): corr = corr[0, 1] tau = 6 * np.arcsin(corr / 2) / np.pi return tau
Bivariate Spearman's rho based on correlation coefficient. Joe (2014) p. 182 Parameters ---------- corr : None or float Pearson correlation. If corr is None, then the correlation will be taken from the copula attribute. Returns ------- Spearman's rho that corresponds to pearson correlation in the elliptical copula.
spearmans_rho
python
statsmodels/statsmodels
statsmodels/distributions/copula/elliptical.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/elliptical.py
BSD-3-Clause
def dependence_tail(self, corr=None): """ Bivariate tail dependence parameter. Joe (2014) p. 182 Parameters ---------- corr : None or float Pearson correlation. If corr is None, then the correlation will be taken from the copula attribute. Returns ------- Lower and upper tail dependence coefficients of the copula with given Pearson correlation coefficient. """ if corr is None: corr = self.corr if corr.shape == (2, 2): corr = corr[0, 1] df = self.df t = - np.sqrt((df + 1) * (1 - corr) / 1 + corr) # Note self.distr_uv is frozen, df cannot change, use stats.t instead lam = 2 * stats.t.cdf(t, df + 1) return lam, lam
Bivariate tail dependence parameter. Joe (2014) p. 182 Parameters ---------- corr : None or float Pearson correlation. If corr is None, then the correlation will be taken from the copula attribute. Returns ------- Lower and upper tail dependence coefficients of the copula with given Pearson correlation coefficient.
dependence_tail
python
statsmodels/statsmodels
statsmodels/distributions/copula/elliptical.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/elliptical.py
BSD-3-Clause
def _debyem1_expansion(x): """Debye function minus 1, Taylor series approximation around zero function is not used """ x = np.asarray(x) # Expansion derived using Wolfram alpha dm1 = (-x/4 + x**2/36 - x**4/3600 + x**6/211680 - x**8/10886400 + x**10/526901760 - x**12 * 691/16999766784000) return dm1
Debye function minus 1, Taylor series approximation around zero function is not used
_debyem1_expansion
python
statsmodels/statsmodels
statsmodels/distributions/copula/archimedean.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py
BSD-3-Clause
def tau_frank(theta): """Kendall's tau for Frank Copula This uses Taylor series expansion for theta <= 1. Parameters ---------- theta : float Parameter of the Frank copula. (not vectorized) Returns ------- tau : float, tau for given theta """ if theta <= 1: tau = _tau_frank_expansion(theta) else: debye_value = _debye(theta) tau = 1 + 4 * (debye_value - 1) / theta return tau
Kendall's tau for Frank Copula This uses Taylor series expansion for theta <= 1. Parameters ---------- theta : float Parameter of the Frank copula. (not vectorized) Returns ------- tau : float, tau for given theta
tau_frank
python
statsmodels/statsmodels
statsmodels/distributions/copula/archimedean.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py
BSD-3-Clause
def cdf(self, u, args=()): """Evaluate cdf of Archimedean copula.""" args = self._handle_args(args) u = self._handle_u(u) axis = -1 phi = self.transform.evaluate phi_inv = self.transform.inverse cdfv = phi_inv(phi(u, *args).sum(axis), *args) # clip numerical noise out = cdfv if isinstance(cdfv, np.ndarray) else None cdfv = np.clip(cdfv, 0., 1., out=out) # inplace if possible return cdfv
Evaluate cdf of Archimedean copula.
cdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/archimedean.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py
BSD-3-Clause
def pdf(self, u, args=()): """Evaluate pdf of Archimedean copula.""" u = self._handle_u(u) args = self._handle_args(args) axis = -1 phi_d1 = self.transform.deriv if u.shape[-1] == 2: psi_d = self.transform.deriv2_inverse elif u.shape[-1] == 3: psi_d = self.transform.deriv3_inverse elif u.shape[-1] == 4: psi_d = self.transform.deriv4_inverse else: # will raise NotImplementedError if not available k = u.shape[-1] def psi_d(*args): return self.transform.derivk_inverse(k, *args) psi = self.transform.evaluate(u, *args).sum(axis) pdfv = np.prod(phi_d1(u, *args), axis) pdfv *= (psi_d(psi, *args)) # use abs, I'm not sure yet about where to add signs return np.abs(pdfv)
Evaluate pdf of Archimedean copula.
pdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/archimedean.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py
BSD-3-Clause
def logpdf(self, u, args=()): """Evaluate log pdf of multivariate Archimedean copula.""" u = self._handle_u(u) args = self._handle_args(args) axis = -1 phi_d1 = self.transform.deriv if u.shape[-1] == 2: psi_d = self.transform.deriv2_inverse elif u.shape[-1] == 3: psi_d = self.transform.deriv3_inverse elif u.shape[-1] == 4: psi_d = self.transform.deriv4_inverse else: # will raise NotImplementedError if not available k = u.shape[-1] def psi_d(*args): return self.transform.derivk_inverse(k, *args) psi = self.transform.evaluate(u, *args).sum(axis) # I need np.abs because derivatives are negative, # is this correct for mv? logpdfv = np.sum(np.log(np.abs(phi_d1(u, *args))), axis) logpdfv += np.log(np.abs(psi_d(psi, *args))) return logpdfv
Evaluate log pdf of multivariate Archimedean copula.
logpdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/archimedean.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py
BSD-3-Clause
def cdfcond_2g1(self, u, args=()): """Conditional cdf of second component given the value of first. """ u = self._handle_u(u) th, = self._handle_args(args) if u.shape[-1] == 2: # bivariate case u1, u2 = u[..., 0], u[..., 1] cdfc = np.exp(- th * u1) cdfc /= np.expm1(-th) / np.expm1(- th * u2) + np.expm1(- th * u1) return cdfc else: raise NotImplementedError("u needs to be bivariate (2 columns)")
Conditional cdf of second component given the value of first.
cdfcond_2g1
python
statsmodels/statsmodels
statsmodels/distributions/copula/archimedean.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py
BSD-3-Clause
def ppfcond_2g1(self, q, u1, args=()): """Conditional pdf of second component given the value of first. """ u1 = np.asarray(u1) th, = self._handle_args(args) if u1.shape[-1] == 1: # bivariate case, conditional on value of first variable ppfc = - np.log(1 + np.expm1(- th) / ((1 / q - 1) * np.exp(-th * u1) + 1)) / th return ppfc else: raise NotImplementedError("u needs to be bivariate (2 columns)")
Conditional pdf of second component given the value of first.
ppfcond_2g1
python
statsmodels/statsmodels
statsmodels/distributions/copula/archimedean.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/archimedean.py
BSD-3-Clause
def copula_bv_ev(u, transform, args=()): '''generic bivariate extreme value copula ''' u, v = u return np.exp(np.log(u * v) * (transform(np.log(u)/np.log(u*v), *args)))
generic bivariate extreme value copula
copula_bv_ev
python
statsmodels/statsmodels
statsmodels/distributions/copula/extreme_value.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/extreme_value.py
BSD-3-Clause
def cdf(self, u, args=()): """Evaluate cdf of bivariate extreme value copula. Parameters ---------- u : array_like Values of random bivariate random variable, each defined on [0, 1], for which cdf is computed. Can be two dimensional with multivariate components in columns and observation in rows. args : tuple Required parameters for the copula. The meaning and number of parameters in the tuple depends on the specific copula. Returns ------- CDF values at evaluation points. """ # currently only Bivariate u, v = np.asarray(u).T args = self._handle_args(args) cdfv = np.exp(np.log(u * v) * self.transform(np.log(u)/np.log(u*v), *args)) return cdfv
Evaluate cdf of bivariate extreme value copula. Parameters ---------- u : array_like Values of random bivariate random variable, each defined on [0, 1], for which cdf is computed. Can be two dimensional with multivariate components in columns and observation in rows. args : tuple Required parameters for the copula. The meaning and number of parameters in the tuple depends on the specific copula. Returns ------- CDF values at evaluation points.
cdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/extreme_value.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/extreme_value.py
BSD-3-Clause
def pdf(self, u, args=()): """Evaluate pdf of bivariate extreme value copula. Parameters ---------- u : array_like Values of random bivariate random variable, each defined on [0, 1], for which cdf is computed. Can be two dimensional with multivariate components in columns and observation in rows. args : tuple Required parameters for the copula. The meaning and number of parameters in the tuple depends on the specific copula. Returns ------- PDF values at evaluation points. """ tr = self.transform u1, u2 = np.asarray(u).T args = self._handle_args(args) log_u12 = np.log(u1 * u2) t = np.log(u1) / log_u12 cdf = self.cdf(u, args) dep = tr(t, *args) d1 = tr.deriv(t, *args) d2 = tr.deriv2(t, *args) pdf_ = cdf / (u1 * u2) * ((dep + (1 - t) * d1) * (dep - t * d1) - d2 * (1 - t) * t / log_u12) return pdf_
Evaluate pdf of bivariate extreme value copula. Parameters ---------- u : array_like Values of random bivariate random variable, each defined on [0, 1], for which cdf is computed. Can be two dimensional with multivariate components in columns and observation in rows. args : tuple Required parameters for the copula. The meaning and number of parameters in the tuple depends on the specific copula. Returns ------- PDF values at evaluation points.
pdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/extreme_value.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/extreme_value.py
BSD-3-Clause
def logpdf(self, u, args=()): """Evaluate log-pdf of bivariate extreme value copula. Parameters ---------- u : array_like Values of random bivariate random variable, each defined on [0, 1], for which cdf is computed. Can be two dimensional with multivariate components in columns and observation in rows. args : tuple Required parameters for the copula. The meaning and number of parameters in the tuple depends on the specific copula. Returns ------- Log-pdf values at evaluation points. """ return np.log(self.pdf(u, args=args))
Evaluate log-pdf of bivariate extreme value copula. Parameters ---------- u : array_like Values of random bivariate random variable, each defined on [0, 1], for which cdf is computed. Can be two dimensional with multivariate components in columns and observation in rows. args : tuple Required parameters for the copula. The meaning and number of parameters in the tuple depends on the specific copula. Returns ------- Log-pdf values at evaluation points.
logpdf
python
statsmodels/statsmodels
statsmodels/distributions/copula/extreme_value.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/extreme_value.py
BSD-3-Clause
def conditional_2g1(self, u, args=()): """conditional distribution not yet implemented C2|1(u2|u1) := ∂C(u1, u2) / ∂u1 = C(u1, u2) / u1 * (A(t) − t A'(t)) where t = np.log(v)/np.log(u*v) """ raise NotImplementedError
conditional distribution not yet implemented C2|1(u2|u1) := ∂C(u1, u2) / ∂u1 = C(u1, u2) / u1 * (A(t) − t A'(t)) where t = np.log(v)/np.log(u*v)
conditional_2g1
python
statsmodels/statsmodels
statsmodels/distributions/copula/extreme_value.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/extreme_value.py
BSD-3-Clause
def rvs_kernel(sample, size, bw=1, k_func=None, return_extras=False): """Random sampling from empirical copula using Beta distribution Parameters ---------- sample : ndarray Sample of multivariate observations in (o, 1) interval. size : int Number of observations to simulate. bw : float Bandwidth for Beta sampling. The beta copula corresponds to a kernel estimate of the distribution. bw=1 corresponds to the empirical beta copula. A small bandwidth like bw=0.001 corresponds to small noise added to the empirical distribution. Larger bw, e.g. bw=10 corresponds to kernel estimate with more smoothing. k_func : None or callable The default kernel function is currently a beta function with 1 added to the first beta parameter. return_extras : bool If this is False, then only the random sample will be returned. If true, then extra information is returned that is mainly of interest for verification. Returns ------- rvs : ndarray Multivariate sample with ``size`` observations drawn from the Beta Copula. Notes ----- Status: experimental, API will change. """ # vectorized for observations n = sample.shape[0] if k_func is None: kfunc = _kernel_rvs_beta1 idx = np.random.randint(0, n, size=size) xi = sample[idx] krvs = np.column_stack([kfunc(xii, bw) for xii in xi.T]) if return_extras: return krvs, idx, xi else: return krvs
Random sampling from empirical copula using Beta distribution Parameters ---------- sample : ndarray Sample of multivariate observations in (o, 1) interval. size : int Number of observations to simulate. bw : float Bandwidth for Beta sampling. The beta copula corresponds to a kernel estimate of the distribution. bw=1 corresponds to the empirical beta copula. A small bandwidth like bw=0.001 corresponds to small noise added to the empirical distribution. Larger bw, e.g. bw=10 corresponds to kernel estimate with more smoothing. k_func : None or callable The default kernel function is currently a beta function with 1 added to the first beta parameter. return_extras : bool If this is False, then only the random sample will be returned. If true, then extra information is returned that is mainly of interest for verification. Returns ------- rvs : ndarray Multivariate sample with ``size`` observations drawn from the Beta Copula. Notes ----- Status: experimental, API will change.
rvs_kernel
python
statsmodels/statsmodels
statsmodels/distributions/copula/other_copulas.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/other_copulas.py
BSD-3-Clause
def clear_cache(self): """clear cache of Sterling numbers """ self._cache = {}
clear cache of Sterling numbers
clear_cache
python
statsmodels/statsmodels
statsmodels/distributions/copula/_special.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/_special.py
BSD-3-Clause
def clear_cache(self): """clear cache of Sterling numbers """ self._cache = {}
clear cache of Sterling numbers
clear_cache
python
statsmodels/statsmodels
statsmodels/distributions/copula/_special.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/_special.py
BSD-3-Clause
def li3(z): """Polylogarithm for negative integer order -3 Li(-3, z) """ return z * (1 + 4 * z + z**2) / (1 - z)**4
Polylogarithm for negative integer order -3 Li(-3, z)
li3
python
statsmodels/statsmodels
statsmodels/distributions/copula/_special.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/_special.py
BSD-3-Clause
def li4(z): """Polylogarithm for negative integer order -4 Li(-4, z) """ return z * (1 + z) * (1 + 10 * z + z**2) / (1 - z)**5
Polylogarithm for negative integer order -4 Li(-4, z)
li4
python
statsmodels/statsmodels
statsmodels/distributions/copula/_special.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/_special.py
BSD-3-Clause
def lin(n, z): """Polylogarithm for negative integer order -n Li(-n, z) https://en.wikipedia.org/wiki/Polylogarithm#Particular_values """ if np.size(z) > 1: z = np.array(z)[..., None] k = np.arange(n+1) st2 = np.array([sterling2(n + 1, ki + 1) for ki in k]) res = (-1)**(n+1) * np.sum(factorial(k) * st2 * (-1 / (1 - z))**(k+1), axis=-1) return res
Polylogarithm for negative integer order -n Li(-n, z) https://en.wikipedia.org/wiki/Polylogarithm#Particular_values
lin
python
statsmodels/statsmodels
statsmodels/distributions/copula/_special.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/distributions/copula/_special.py
BSD-3-Clause
def _next_regular(target): """ Find the next regular number greater than or equal to target. Regular numbers are composites of the prime factors 2, 3, and 5. Also known as 5-smooth numbers or Hamming numbers, these are the optimal size for inputs to FFTPACK. Target must be a positive integer. """ if target <= 6: return target # Quickly check if it's already a power of 2 if not (target & (target - 1)): return target match = float("inf") # Anything found will be smaller p5 = 1 while p5 < target: p35 = p5 while p35 < target: # Ceiling integer division, avoiding conversion to float # (quotient = ceil(target / p35)) quotient = -(-target // p35) # Quickly find next power of 2 >= quotient p2 = 2 ** ((quotient - 1).bit_length()) N = p2 * p35 if N == target: return N elif N < match: match = N p35 *= 3 if p35 == target: return p35 if p35 < match: match = p35 p5 *= 5 if p5 == target: return p5 if p5 < match: match = p5 return match
Find the next regular number greater than or equal to target. Regular numbers are composites of the prime factors 2, 3, and 5. Also known as 5-smooth numbers or Hamming numbers, these are the optimal size for inputs to FFTPACK. Target must be a positive integer.
_next_regular
python
statsmodels/statsmodels
statsmodels/compat/scipy.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/scipy.py
BSD-3-Clause
def _valarray(shape, value=np.nan, typecode=None): """Return an array of all value.""" out = np.ones(shape, dtype=bool) * value if typecode is not None: out = out.astype(typecode) if not isinstance(out, np.ndarray): out = np.asarray(out) return out
Return an array of all value.
_valarray
python
statsmodels/statsmodels
statsmodels/compat/scipy.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/scipy.py
BSD-3-Clause
def pytest_warns( warning: type[Warning] | tuple[type[Warning], ...] | None ) -> WarningsChecker | NoWarningsChecker: """ Parameters ---------- warning : {None, Warning, Tuple[Warning]} None if no warning is produced, or a single or multiple Warnings Returns ------- cm """ if warning is None: return NoWarningsChecker() else: assert warning is not None return warns(warning)
Parameters ---------- warning : {None, Warning, Tuple[Warning]} None if no warning is produced, or a single or multiple Warnings Returns ------- cm
pytest_warns
python
statsmodels/statsmodels
statsmodels/compat/pytest.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/pytest.py
BSD-3-Clause
def _squeeze_output(out): """ Remove single-dimensional entries from array and convert to scalar, if necessary. """ out = out.squeeze() if out.ndim == 0: out = out[()] return out
Remove single-dimensional entries from array and convert to scalar, if necessary.
_squeeze_output
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None): """ Determine which eigenvalues are "small" given the spectrum. This is for compatibility across various linear algebra functions that should agree about whether or not a Hermitian matrix is numerically singular and what is its numerical matrix rank. This is designed to be compatible with scipy.linalg.pinvh. Parameters ---------- spectrum : 1d ndarray Array of eigenvalues of a Hermitian matrix. cond, rcond : float, optional Cutoff for small eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. Returns ------- eps : float Magnitude cutoff for numerical negligibility. """ if rcond is not None: cond = rcond if cond in [None, -1]: t = spectrum.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps eps = cond * np.max(abs(spectrum)) return eps
Determine which eigenvalues are "small" given the spectrum. This is for compatibility across various linear algebra functions that should agree about whether or not a Hermitian matrix is numerically singular and what is its numerical matrix rank. This is designed to be compatible with scipy.linalg.pinvh. Parameters ---------- spectrum : 1d ndarray Array of eigenvalues of a Hermitian matrix. cond, rcond : float, optional Cutoff for small eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. Returns ------- eps : float Magnitude cutoff for numerical negligibility.
_eigvalsh_to_eps
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def _pinv_1d(v, eps=1e-5): """ A helper function for computing the pseudoinverse. Parameters ---------- v : iterable of numbers This may be thought of as a vector of eigenvalues or singular values. eps : float Values with magnitude no greater than eps are considered negligible. Returns ------- v_pinv : 1d float ndarray A vector of pseudo-inverted numbers. """ return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
A helper function for computing the pseudoinverse. Parameters ---------- v : iterable of numbers This may be thought of as a vector of eigenvalues or singular values. eps : float Values with magnitude no greater than eps are considered negligible. Returns ------- v_pinv : 1d float ndarray A vector of pseudo-inverted numbers.
_pinv_1d
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def random_state(self): """ Get or set the RandomState object for generating random variates. This can be either None, int, a RandomState instance, or a np.random.Generator instance. If None (or np.random), use the RandomState singleton used by np.random. If already a RandomState or Generator instance, use it. If an int, use a new RandomState instance seeded with seed. """ return self._random_state
Get or set the RandomState object for generating random variates. This can be either None, int, a RandomState instance, or a np.random.Generator instance. If None (or np.random), use the RandomState singleton used by np.random. If already a RandomState or Generator instance, use it. If an int, use a new RandomState instance seeded with seed.
random_state
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None): """ Create a frozen multivariate normal distribution. See `multivariate_normal_frozen` for more information. """ return multivariate_normal_frozen(mean, cov, allow_singular=allow_singular, seed=seed)
Create a frozen multivariate normal distribution. See `multivariate_normal_frozen` for more information.
__call__
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def _process_parameters(self, dim, mean, cov): """ Infer dimensionality from mean or covariance matrix, ensure that mean and covariance are full vector resp. matrix. """ # Try to infer dimensionality if dim is None: if mean is None: if cov is None: dim = 1 else: cov = np.asarray(cov, dtype=float) if cov.ndim < 2: dim = 1 else: dim = cov.shape[0] else: mean = np.asarray(mean, dtype=float) dim = mean.size else: if not np.isscalar(dim): raise ValueError("Dimension of random variable must be " "a scalar.") # Check input sizes and return full arrays for mean and cov if # necessary if mean is None: mean = np.zeros(dim) mean = np.asarray(mean, dtype=float) if cov is None: cov = 1.0 cov = np.asarray(cov, dtype=float) if dim == 1: mean.shape = (1,) cov.shape = (1, 1) if mean.ndim != 1 or mean.shape[0] != dim: raise ValueError("Array 'mean' must be a vector of length %d." % dim) if cov.ndim == 0: cov = cov * np.eye(dim) elif cov.ndim == 1: cov = np.diag(cov) elif cov.ndim == 2 and cov.shape != (dim, dim): rows, cols = cov.shape if rows != cols: msg = ("Array 'cov' must be square if it is two dimensional," " but cov.shape = %s." % str(cov.shape)) else: msg = ("Dimension mismatch: array 'cov' is of shape %s," " but 'mean' is a vector of length %d.") msg = msg % (str(cov.shape), len(mean)) raise ValueError(msg) elif cov.ndim > 2: raise ValueError("Array 'cov' must be at most two-dimensional," " but cov.ndim = %d" % cov.ndim) return dim, mean, cov
Infer dimensionality from mean or covariance matrix, ensure that mean and covariance are full vector resp. matrix.
_process_parameters
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def _process_quantiles(self, x, dim): """ Adjust quantiles array so that last axis labels the components of each data point. """ x = np.asarray(x, dtype=float) if x.ndim == 0: x = x[np.newaxis] elif x.ndim == 1: if dim == 1: x = x[:, np.newaxis] else: x = x[np.newaxis, :] return x
Adjust quantiles array so that last axis labels the components of each data point.
_process_quantiles
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def _logpdf(self, x, mean, prec_U, log_det_cov, rank): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function mean : ndarray Mean of the distribution prec_U : ndarray A decomposition such that np.dot(prec_U, prec_U.T) is the precision matrix, i.e. inverse of the covariance matrix. log_det_cov : float Logarithm of the determinant of the covariance matrix rank : int Rank of the covariance matrix. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ dev = x - mean maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1) return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function mean : ndarray Mean of the distribution prec_U : ndarray A decomposition such that np.dot(prec_U, prec_U.T) is the precision matrix, i.e. inverse of the covariance matrix. log_det_cov : float Logarithm of the determinant of the covariance matrix rank : int Rank of the covariance matrix. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead.
_logpdf
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def logpdf(self, x, mean=None, cov=1, allow_singular=False): """ Log of the multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s Returns ------- pdf : ndarray or scalar Log of the probability density function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s """ dim, mean, cov = self._process_parameters(None, mean, cov) x = self._process_quantiles(x, dim) psd = _PSD(cov, allow_singular=allow_singular) out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank) return _squeeze_output(out)
Log of the multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s Returns ------- pdf : ndarray or scalar Log of the probability density function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s
logpdf
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def pdf(self, x, mean=None, cov=1, allow_singular=False): """ Multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s Returns ------- pdf : ndarray or scalar Probability density function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s """ dim, mean, cov = self._process_parameters(None, mean, cov) x = self._process_quantiles(x, dim) psd = _PSD(cov, allow_singular=allow_singular) out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)) return _squeeze_output(out)
Multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s Returns ------- pdf : ndarray or scalar Probability density function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s
pdf
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def _cdf(self, x, mean, cov, maxpts, abseps, releps): """ Parameters ---------- x : ndarray Points at which to evaluate the cumulative distribution function. mean : ndarray Mean of the distribution cov : array_like Covariance matrix of the distribution maxpts: integer The maximum number of points to use for integration abseps: float Absolute error tolerance releps: float Relative error tolerance Notes ----- As this function does no argument checking, it should not be called directly; use 'cdf' instead. .. versionadded:: 1.0.0 """ lower = np.full(mean.shape, -np.inf) # mvnun expects 1-d arguments, so process points sequentially def func1d(x_slice): return mvn.mvnun(lower, x_slice, mean, cov, maxpts, abseps, releps)[0] out = np.apply_along_axis(func1d, -1, x) return _squeeze_output(out)
Parameters ---------- x : ndarray Points at which to evaluate the cumulative distribution function. mean : ndarray Mean of the distribution cov : array_like Covariance matrix of the distribution maxpts: integer The maximum number of points to use for integration abseps: float Absolute error tolerance releps: float Relative error tolerance Notes ----- As this function does no argument checking, it should not be called directly; use 'cdf' instead. .. versionadded:: 1.0.0
_cdf
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, abseps=1e-5, releps=1e-5): """ Log of the multivariate normal cumulative distribution function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s maxpts: integer, optional The maximum number of points to use for integration (default `1000000*dim`) abseps: float, optional Absolute error tolerance (default 1e-5) releps: float, optional Relative error tolerance (default 1e-5) Returns ------- cdf : ndarray or scalar Log of the cumulative distribution function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s .. versionadded:: 1.0.0 """ dim, mean, cov = self._process_parameters(None, mean, cov) x = self._process_quantiles(x, dim) # Use _PSD to check covariance matrix _PSD(cov, allow_singular=allow_singular) if not maxpts: maxpts = 1000000 * dim out = np.log(self._cdf(x, mean, cov, maxpts, abseps, releps)) return out
Log of the multivariate normal cumulative distribution function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s maxpts: integer, optional The maximum number of points to use for integration (default `1000000*dim`) abseps: float, optional Absolute error tolerance (default 1e-5) releps: float, optional Relative error tolerance (default 1e-5) Returns ------- cdf : ndarray or scalar Log of the cumulative distribution function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s .. versionadded:: 1.0.0
logcdf
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, abseps=1e-5, releps=1e-5): """ Multivariate normal cumulative distribution function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s maxpts: integer, optional The maximum number of points to use for integration (default `1000000*dim`) abseps: float, optional Absolute error tolerance (default 1e-5) releps: float, optional Relative error tolerance (default 1e-5) Returns ------- cdf : ndarray or scalar Cumulative distribution function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s .. versionadded:: 1.0.0 """ dim, mean, cov = self._process_parameters(None, mean, cov) x = self._process_quantiles(x, dim) # Use _PSD to check covariance matrix _PSD(cov, allow_singular=allow_singular) if not maxpts: maxpts = 1000000 * dim out = self._cdf(x, mean, cov, maxpts, abseps, releps) return out
Multivariate normal cumulative distribution function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s maxpts: integer, optional The maximum number of points to use for integration (default `1000000*dim`) abseps: float, optional Absolute error tolerance (default 1e-5) releps: float, optional Relative error tolerance (default 1e-5) Returns ------- cdf : ndarray or scalar Cumulative distribution function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s .. versionadded:: 1.0.0
cdf
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def rvs(self, mean=None, cov=1, size=1, random_state=None): """ Draw random samples from a multivariate normal distribution. Parameters ---------- %(_mvn_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `N`), where `N` is the dimension of the random variable. Notes ----- %(_mvn_doc_callparams_note)s """ dim, mean, cov = self._process_parameters(None, mean, cov) random_state = self._get_random_state(random_state) out = random_state.multivariate_normal(mean, cov, size) return _squeeze_output(out)
Draw random samples from a multivariate normal distribution. Parameters ---------- %(_mvn_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `N`), where `N` is the dimension of the random variable. Notes ----- %(_mvn_doc_callparams_note)s
rvs
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def entropy(self, mean=None, cov=1): """ Compute the differential entropy of the multivariate normal. Parameters ---------- %(_mvn_doc_default_callparams)s Returns ------- h : scalar Entropy of the multivariate normal distribution Notes ----- %(_mvn_doc_callparams_note)s """ dim, mean, cov = self._process_parameters(None, mean, cov) _, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov) return 0.5 * logdet
Compute the differential entropy of the multivariate normal. Parameters ---------- %(_mvn_doc_default_callparams)s Returns ------- h : scalar Entropy of the multivariate normal distribution Notes ----- %(_mvn_doc_callparams_note)s
entropy
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None, maxpts=None, abseps=1e-5, releps=1e-5): """ Create a frozen multivariate normal distribution. Parameters ---------- mean : array_like, optional Mean of the distribution (default zero) cov : array_like, optional Covariance matrix of the distribution (default one) allow_singular : bool, optional If this flag is True then tolerate a singular covariance matrix (default False). seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional This parameter defines the object to use for drawing random variates. If `seed` is `None` the `~np.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with seed. If `seed` is already a ``RandomState`` or ``Generator`` instance, then that object is used. Default is None. maxpts: integer, optional The maximum number of points to use for integration of the cumulative distribution function (default `1000000*dim`) abseps: float, optional Absolute error tolerance for the cumulative distribution function (default 1e-5) releps: float, optional Relative error tolerance for the cumulative distribution function (default 1e-5) Examples -------- When called with the default parameters, this will create a 1D random variable with mean 0 and covariance 1: >>> from scipy.stats import multivariate_normal >>> r = multivariate_normal() >>> r.mean array([ 0.]) >>> r.cov array([[1.]]) """ self._dist = multivariate_normal_gen(seed) self.dim, self.mean, self.cov = self._dist._process_parameters( None, mean, cov) self.cov_info = _PSD(self.cov, allow_singular=allow_singular) if not maxpts: maxpts = 1000000 * self.dim self.maxpts = maxpts self.abseps = abseps self.releps = releps
Create a frozen multivariate normal distribution. Parameters ---------- mean : array_like, optional Mean of the distribution (default zero) cov : array_like, optional Covariance matrix of the distribution (default one) allow_singular : bool, optional If this flag is True then tolerate a singular covariance matrix (default False). seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional This parameter defines the object to use for drawing random variates. If `seed` is `None` the `~np.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with seed. If `seed` is already a ``RandomState`` or ``Generator`` instance, then that object is used. Default is None. maxpts: integer, optional The maximum number of points to use for integration of the cumulative distribution function (default `1000000*dim`) abseps: float, optional Absolute error tolerance for the cumulative distribution function (default 1e-5) releps: float, optional Relative error tolerance for the cumulative distribution function (default 1e-5) Examples -------- When called with the default parameters, this will create a 1D random variable with mean 0 and covariance 1: >>> from scipy.stats import multivariate_normal >>> r = multivariate_normal() >>> r.mean array([ 0.]) >>> r.cov array([[1.]])
__init__
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def entropy(self): """ Computes the differential entropy of the multivariate normal. Returns ------- h : scalar Entropy of the multivariate normal distribution """ log_pdet = self.cov_info.log_pdet rank = self.cov_info.rank return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
Computes the differential entropy of the multivariate normal. Returns ------- h : scalar Entropy of the multivariate normal distribution
entropy
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def __init__(self, seed=None): """ Initialize a multivariate t-distributed random variable. Parameters ---------- seed : Random state. """ super().__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params) self._random_state = check_random_state(seed)
Initialize a multivariate t-distributed random variable. Parameters ---------- seed : Random state.
__init__
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def __call__(self, loc=None, shape=1, df=1, allow_singular=False, seed=None): """ Create a frozen multivariate t-distribution. See `multivariate_t_frozen` for parameters. """ if df == np.inf: return multivariate_normal_frozen(mean=loc, cov=shape, allow_singular=allow_singular, seed=seed) return multivariate_t_frozen(loc=loc, shape=shape, df=df, allow_singular=allow_singular, seed=seed)
Create a frozen multivariate t-distribution. See `multivariate_t_frozen` for parameters.
__call__
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False): """ Multivariate t-distribution probability density function. Parameters ---------- x : array_like Points at which to evaluate the probability density function. %(_mvt_doc_default_callparams)s Returns ------- pdf : Probability density function evaluated at `x`. Examples -------- >>> from scipy.stats import multivariate_t >>> x = [0.4, 5] >>> loc = [0, 1] >>> shape = [[1, 0.1], [0.1, 1]] >>> df = 7 >>> multivariate_t.pdf(x, loc, shape, df) array([0.00075713]) """ dim, loc, shape, df = self._process_parameters(loc, shape, df) x = self._process_quantiles(x, dim) shape_info = _PSD(shape, allow_singular=allow_singular) logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim, shape_info.rank) return np.exp(logpdf)
Multivariate t-distribution probability density function. Parameters ---------- x : array_like Points at which to evaluate the probability density function. %(_mvt_doc_default_callparams)s Returns ------- pdf : Probability density function evaluated at `x`. Examples -------- >>> from scipy.stats import multivariate_t >>> x = [0.4, 5] >>> loc = [0, 1] >>> shape = [[1, 0.1], [0.1, 1]] >>> df = 7 >>> multivariate_t.pdf(x, loc, shape, df) array([0.00075713])
pdf
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def logpdf(self, x, loc=None, shape=1, df=1): """ Log of the multivariate t-distribution probability density function. Parameters ---------- x : array_like Points at which to evaluate the log of the probability density function. %(_mvt_doc_default_callparams)s Returns ------- logpdf : Log of the probability density function evaluated at `x`. Examples -------- >>> from scipy.stats import multivariate_t >>> x = [0.4, 5] >>> loc = [0, 1] >>> shape = [[1, 0.1], [0.1, 1]] >>> df = 7 >>> multivariate_t.logpdf(x, loc, shape, df) array([-7.1859802]) See Also -------- pdf : Probability density function. """ dim, loc, shape, df = self._process_parameters(loc, shape, df) x = self._process_quantiles(x, dim) shape_info = _PSD(shape) return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim, shape_info.rank)
Log of the multivariate t-distribution probability density function. Parameters ---------- x : array_like Points at which to evaluate the log of the probability density function. %(_mvt_doc_default_callparams)s Returns ------- logpdf : Log of the probability density function evaluated at `x`. Examples -------- >>> from scipy.stats import multivariate_t >>> x = [0.4, 5] >>> loc = [0, 1] >>> shape = [[1, 0.1], [0.1, 1]] >>> df = 7 >>> multivariate_t.logpdf(x, loc, shape, df) array([-7.1859802]) See Also -------- pdf : Probability density function.
logpdf
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank): """Utility method `pdf`, `logpdf` for parameters. Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function. loc : ndarray Location of the distribution. prec_U : ndarray A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse of the shape matrix. log_pdet : float Logarithm of the determinant of the shape matrix. df : float Degrees of freedom of the distribution. dim : int Dimension of the quantiles x. rank : int Rank of the shape matrix. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ if df == np.inf: return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank) dev = x - loc maha = np.square(np.dot(dev, prec_U)).sum(axis=-1) t = 0.5 * (df + dim) A = gammaln(t) B = gammaln(0.5 * df) C = dim/2. * np.log(df * np.pi) D = 0.5 * log_pdet E = -t * np.log(1 + (1./df) * maha) return _squeeze_output(A - B - C - D + E)
Utility method `pdf`, `logpdf` for parameters. Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function. loc : ndarray Location of the distribution. prec_U : ndarray A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse of the shape matrix. log_pdet : float Logarithm of the determinant of the shape matrix. df : float Degrees of freedom of the distribution. dim : int Dimension of the quantiles x. rank : int Rank of the shape matrix. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead.
_logpdf
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause
def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None): """ Draw random samples from a multivariate t-distribution. Parameters ---------- %(_mvt_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `P`), where `P` is the dimension of the random variable. Examples -------- >>> from scipy.stats import multivariate_t >>> x = [0.4, 5] >>> loc = [0, 1] >>> shape = [[1, 0.1], [0.1, 1]] >>> df = 7 >>> multivariate_t.rvs(loc, shape, df) array([[0.93477495, 3.00408716]]) """ # For implementation details, see equation (3): # # Hofert, "On Sampling from the Multivariatet Distribution", 2013 # http://rjournal.github.io/archive/2013-2/hofert.pdf # dim, loc, shape, df = self._process_parameters(loc, shape, df) if random_state is not None: rng = check_random_state(random_state) else: rng = self._random_state if np.isinf(df): x = np.ones(size) else: x = rng.chisquare(df, size=size) / df z = rng.multivariate_normal(np.zeros(dim), shape, size=size) samples = loc + z / np.sqrt(x)[:, None] return _squeeze_output(samples)
Draw random samples from a multivariate t-distribution. Parameters ---------- %(_mvt_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `P`), where `P` is the dimension of the random variable. Examples -------- >>> from scipy.stats import multivariate_t >>> x = [0.4, 5] >>> loc = [0, 1] >>> shape = [[1, 0.1], [0.1, 1]] >>> df = 7 >>> multivariate_t.rvs(loc, shape, df) array([[0.93477495, 3.00408716]])
rvs
python
statsmodels/statsmodels
statsmodels/compat/_scipy_multivariate_t.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/compat/_scipy_multivariate_t.py
BSD-3-Clause