Spaces:
Running
Running
Create cubic_cpp.cpp
Browse files- cubic_cpp.cpp +579 -0
cubic_cpp.cpp
ADDED
@@ -0,0 +1,579 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <pybind11/pybind11.h>
|
2 |
+
#include <pybind11/stl.h>
|
3 |
+
#include <pybind11/complex.h>
|
4 |
+
#include <pybind11/eigen.h>
|
5 |
+
#include <Eigen/Dense>
|
6 |
+
#include <vector>
|
7 |
+
#include <complex>
|
8 |
+
#include <cmath>
|
9 |
+
#include <random>
|
10 |
+
|
11 |
+
namespace py = pybind11;
|
12 |
+
|
13 |
+
// Helper function to apply y condition
|
14 |
+
double apply_y_condition(double y) {
|
15 |
+
return y > 1.0 ? y : 1.0 / y;
|
16 |
+
}
|
17 |
+
|
18 |
+
// Discriminant calculation
|
19 |
+
double discriminant_func(double z, double beta, double z_a, double y) {
|
20 |
+
double y_effective = apply_y_condition(y);
|
21 |
+
|
22 |
+
// Coefficients
|
23 |
+
double a = z * z_a;
|
24 |
+
double b = z * z_a + z + z_a - z_a * y_effective;
|
25 |
+
double c = z + z_a + 1.0 - y_effective * (beta * z_a + 1.0 - beta);
|
26 |
+
double d = 1.0;
|
27 |
+
|
28 |
+
// Discriminant formula
|
29 |
+
return std::pow((b * c) / (6.0 * a * a) - std::pow(b, 3) / (27.0 * std::pow(a, 3)) - d / (2.0 * a), 2) +
|
30 |
+
std::pow(c / (3.0 * a) - std::pow(b, 2) / (9.0 * std::pow(a, 2)), 3);
|
31 |
+
}
|
32 |
+
|
33 |
+
// Find zeros of discriminant
|
34 |
+
std::vector<double> find_z_at_discriminant_zero(double z_a, double y, double beta,
|
35 |
+
double z_min, double z_max, int steps) {
|
36 |
+
std::vector<double> roots_found;
|
37 |
+
double y_effective = apply_y_condition(y);
|
38 |
+
|
39 |
+
// Create z grid
|
40 |
+
std::vector<double> z_grid(steps);
|
41 |
+
double step_size = (z_max - z_min) / (steps - 1);
|
42 |
+
for (int i = 0; i < steps; i++) {
|
43 |
+
z_grid[i] = z_min + i * step_size;
|
44 |
+
}
|
45 |
+
|
46 |
+
// Evaluate discriminant at each grid point
|
47 |
+
std::vector<double> disc_vals(steps);
|
48 |
+
for (int i = 0; i < steps; i++) {
|
49 |
+
disc_vals[i] = discriminant_func(z_grid[i], beta, z_a, y_effective);
|
50 |
+
}
|
51 |
+
|
52 |
+
// Find sign changes (zeros)
|
53 |
+
for (int i = 0; i < steps - 1; i++) {
|
54 |
+
double f1 = disc_vals[i];
|
55 |
+
double f2 = disc_vals[i+1];
|
56 |
+
|
57 |
+
// Skip if NaN
|
58 |
+
if (std::isnan(f1) || std::isnan(f2)) {
|
59 |
+
continue;
|
60 |
+
}
|
61 |
+
|
62 |
+
// Check for exact zeros
|
63 |
+
if (f1 == 0.0) {
|
64 |
+
roots_found.push_back(z_grid[i]);
|
65 |
+
} else if (f2 == 0.0) {
|
66 |
+
roots_found.push_back(z_grid[i+1]);
|
67 |
+
} else if (f1 * f2 < 0) {
|
68 |
+
// Sign change - use binary search to refine
|
69 |
+
double zl = z_grid[i];
|
70 |
+
double zr = z_grid[i+1];
|
71 |
+
|
72 |
+
for (int iter = 0; iter < 50; iter++) {
|
73 |
+
double mid = 0.5 * (zl + zr);
|
74 |
+
double fm = discriminant_func(mid, beta, z_a, y_effective);
|
75 |
+
|
76 |
+
if (fm == 0.0) {
|
77 |
+
zl = zr = mid;
|
78 |
+
break;
|
79 |
+
}
|
80 |
+
|
81 |
+
if ((fm < 0 && f1 < 0) || (fm > 0 && f1 > 0)) {
|
82 |
+
zl = mid;
|
83 |
+
f1 = fm;
|
84 |
+
} else {
|
85 |
+
zr = mid;
|
86 |
+
f2 = fm;
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
roots_found.push_back(0.5 * (zl + zr));
|
91 |
+
}
|
92 |
+
}
|
93 |
+
|
94 |
+
return roots_found;
|
95 |
+
}
|
96 |
+
|
97 |
+
// Sweep beta and find z bounds
|
98 |
+
std::tuple<std::vector<double>, std::vector<double>, std::vector<double>>
|
99 |
+
sweep_beta_and_find_z_bounds(double z_a, double y, double z_min, double z_max,
|
100 |
+
int beta_steps, int z_steps) {
|
101 |
+
std::vector<double> betas(beta_steps);
|
102 |
+
std::vector<double> z_min_values(beta_steps, 0.0);
|
103 |
+
std::vector<double> z_max_values(beta_steps, 0.0);
|
104 |
+
|
105 |
+
double beta_step = 1.0 / (beta_steps - 1);
|
106 |
+
for (int i = 0; i < beta_steps; i++) {
|
107 |
+
betas[i] = i * beta_step;
|
108 |
+
|
109 |
+
std::vector<double> roots = find_z_at_discriminant_zero(z_a, y, betas[i], z_min, z_max, z_steps);
|
110 |
+
|
111 |
+
if (roots.empty()) {
|
112 |
+
z_min_values[i] = std::numeric_limits<double>::quiet_NaN();
|
113 |
+
z_max_values[i] = std::numeric_limits<double>::quiet_NaN();
|
114 |
+
} else {
|
115 |
+
// Find min and max roots
|
116 |
+
double min_root = *std::min_element(roots.begin(), roots.end());
|
117 |
+
double max_root = *std::max_element(roots.begin(), roots.end());
|
118 |
+
|
119 |
+
z_min_values[i] = min_root;
|
120 |
+
z_max_values[i] = max_root;
|
121 |
+
}
|
122 |
+
}
|
123 |
+
|
124 |
+
return std::make_tuple(betas, z_min_values, z_max_values);
|
125 |
+
}
|
126 |
+
|
127 |
+
// Compute cubic roots
|
128 |
+
std::vector<std::complex<double>> compute_cubic_roots(double z, double beta, double z_a, double y) {
|
129 |
+
double y_effective = apply_y_condition(y);
|
130 |
+
|
131 |
+
// Coefficients
|
132 |
+
double a = z * z_a;
|
133 |
+
double b = z * z_a + z + z_a - z_a * y_effective;
|
134 |
+
double c = z + z_a + 1.0 - y_effective * (beta * z_a + 1.0 - beta);
|
135 |
+
double d = 1.0;
|
136 |
+
|
137 |
+
std::vector<std::complex<double>> roots(3);
|
138 |
+
|
139 |
+
// Handle special cases
|
140 |
+
if (std::abs(a) < 1e-10) {
|
141 |
+
if (std::abs(b) < 1e-10) {
|
142 |
+
// Linear case
|
143 |
+
roots[0] = std::complex<double>(-d/c, 0);
|
144 |
+
roots[1] = std::complex<double>(0, 0);
|
145 |
+
roots[2] = std::complex<double>(0, 0);
|
146 |
+
} else {
|
147 |
+
// Quadratic case
|
148 |
+
double discriminant = c*c - 4.0*b*d;
|
149 |
+
if (discriminant >= 0) {
|
150 |
+
double sqrt_disc = std::sqrt(discriminant);
|
151 |
+
roots[0] = std::complex<double>((-c + sqrt_disc) / (2.0 * b), 0);
|
152 |
+
roots[1] = std::complex<double>((-c - sqrt_disc) / (2.0 * b), 0);
|
153 |
+
} else {
|
154 |
+
double sqrt_disc = std::sqrt(-discriminant);
|
155 |
+
roots[0] = std::complex<double>(-c / (2.0 * b), sqrt_disc / (2.0 * b));
|
156 |
+
roots[1] = std::complex<double>(-c / (2.0 * b), -sqrt_disc / (2.0 * b));
|
157 |
+
}
|
158 |
+
roots[2] = std::complex<double>(0, 0);
|
159 |
+
}
|
160 |
+
return roots;
|
161 |
+
}
|
162 |
+
|
163 |
+
// Standard cubic formula implementation
|
164 |
+
// Normalize to form: x^3 + px^2 + qx + r = 0
|
165 |
+
double p = b / a;
|
166 |
+
double q = c / a;
|
167 |
+
double r = d / a;
|
168 |
+
|
169 |
+
// Depress the cubic: substitute x = y - p/3 to get y^3 + py + q = 0
|
170 |
+
double p_over_3 = p / 3.0;
|
171 |
+
double new_p = q - p * p / 3.0;
|
172 |
+
double new_q = r - p * q / 3.0 + 2.0 * p * p * p / 27.0;
|
173 |
+
|
174 |
+
// Calculate discriminant
|
175 |
+
double discriminant = 4.0 * new_p * new_p * new_p / 27.0 + new_q * new_q;
|
176 |
+
|
177 |
+
if (std::abs(discriminant) < 1e-10) {
|
178 |
+
// Three real roots, at least two are equal
|
179 |
+
double u;
|
180 |
+
if (std::abs(new_q) < 1e-10) {
|
181 |
+
u = 0;
|
182 |
+
} else {
|
183 |
+
u = std::cbrt(-new_q / 2.0);
|
184 |
+
}
|
185 |
+
roots[0] = std::complex<double>(2.0 * u - p_over_3, 0);
|
186 |
+
roots[1] = std::complex<double>(-u - p_over_3, 0);
|
187 |
+
roots[2] = std::complex<double>(-u - p_over_3, 0);
|
188 |
+
} else if (discriminant > 0) {
|
189 |
+
// One real root, two complex conjugate roots
|
190 |
+
double sqrt_disc = std::sqrt(discriminant);
|
191 |
+
double u = std::cbrt(-new_q / 2.0 + sqrt_disc / 2.0);
|
192 |
+
double v = std::cbrt(-new_q / 2.0 - sqrt_disc / 2.0);
|
193 |
+
|
194 |
+
// Real root
|
195 |
+
roots[0] = std::complex<double>(u + v - p_over_3, 0);
|
196 |
+
|
197 |
+
// Complex roots
|
198 |
+
const double sqrt3_over_2 = std::sqrt(3.0) / 2.0;
|
199 |
+
roots[1] = std::complex<double>(-0.5 * (u + v) - p_over_3, sqrt3_over_2 * (u - v));
|
200 |
+
roots[2] = std::complex<double>(-0.5 * (u + v) - p_over_3, -sqrt3_over_2 * (u - v));
|
201 |
+
} else {
|
202 |
+
// Three distinct real roots
|
203 |
+
double theta = std::acos(-new_q / (2.0 * std::sqrt(-std::pow(new_p, 3) / 27.0)));
|
204 |
+
double sqrt_term = 2.0 * std::sqrt(-new_p / 3.0);
|
205 |
+
|
206 |
+
roots[0] = std::complex<double>(sqrt_term * std::cos(theta / 3.0) - p_over_3, 0);
|
207 |
+
roots[1] = std::complex<double>(sqrt_term * std::cos((theta + 2.0 * M_PI) / 3.0) - p_over_3, 0);
|
208 |
+
roots[2] = std::complex<double>(sqrt_term * std::cos((theta + 4.0 * M_PI) / 3.0) - p_over_3, 0);
|
209 |
+
}
|
210 |
+
|
211 |
+
return roots;
|
212 |
+
}
|
213 |
+
|
214 |
+
// Compute eigenvalue support boundaries
|
215 |
+
std::tuple<std::vector<double>, std::vector<double>>
|
216 |
+
compute_eigenvalue_support_boundaries(double z_a, double y, const std::vector<double>& beta_values,
|
217 |
+
int n_samples, int seeds) {
|
218 |
+
double y_effective = apply_y_condition(y);
|
219 |
+
size_t num_betas = beta_values.size();
|
220 |
+
|
221 |
+
std::vector<double> min_eigenvalues(num_betas, 0.0);
|
222 |
+
std::vector<double> max_eigenvalues(num_betas, 0.0);
|
223 |
+
|
224 |
+
for (size_t i = 0; i < num_betas; i++) {
|
225 |
+
double beta = beta_values[i];
|
226 |
+
|
227 |
+
std::vector<double> min_vals;
|
228 |
+
std::vector<double> max_vals;
|
229 |
+
|
230 |
+
// Run multiple trials
|
231 |
+
for (int seed = 0; seed < seeds; seed++) {
|
232 |
+
// Set random seed
|
233 |
+
std::mt19937 gen(seed * 100 + i);
|
234 |
+
std::normal_distribution<double> normal_dist(0.0, 1.0);
|
235 |
+
|
236 |
+
// Compute dimensions
|
237 |
+
int n = n_samples;
|
238 |
+
int p = static_cast<int>(y_effective * n);
|
239 |
+
|
240 |
+
// Construct T_n (Population/Shape Matrix)
|
241 |
+
int k = static_cast<int>(std::floor(beta * p));
|
242 |
+
Eigen::VectorXd diag_entries(p);
|
243 |
+
|
244 |
+
// Fill diagonal entries
|
245 |
+
for (int j = 0; j < k; j++) {
|
246 |
+
diag_entries(j) = z_a;
|
247 |
+
}
|
248 |
+
for (int j = k; j < p; j++) {
|
249 |
+
diag_entries(j) = 1.0;
|
250 |
+
}
|
251 |
+
|
252 |
+
// Shuffle diagonal entries
|
253 |
+
for (int j = p - 1; j > 0; j--) {
|
254 |
+
std::uniform_int_distribution<int> uniform_dist(0, j);
|
255 |
+
int idx = uniform_dist(gen);
|
256 |
+
std::swap(diag_entries(j), diag_entries(idx));
|
257 |
+
}
|
258 |
+
|
259 |
+
Eigen::MatrixXd T_n = diag_entries.asDiagonal();
|
260 |
+
|
261 |
+
// Generate the data matrix X with i.i.d. standard normal entries
|
262 |
+
Eigen::MatrixXd X(p, n);
|
263 |
+
for (int row = 0; row < p; row++) {
|
264 |
+
for (int col = 0; col < n; col++) {
|
265 |
+
X(row, col) = normal_dist(gen);
|
266 |
+
}
|
267 |
+
}
|
268 |
+
|
269 |
+
// Compute the sample covariance matrix S_n = (1/n) * XX^T
|
270 |
+
Eigen::MatrixXd S_n = (1.0 / n) * (X * X.transpose());
|
271 |
+
|
272 |
+
// Compute B_n = S_n T_n
|
273 |
+
Eigen::MatrixXd B_n = S_n * T_n;
|
274 |
+
|
275 |
+
// Compute eigenvalues of B_n
|
276 |
+
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> solver(B_n);
|
277 |
+
Eigen::VectorXd eigenvalues = solver.eigenvalues();
|
278 |
+
|
279 |
+
// Find minimum and maximum eigenvalues
|
280 |
+
min_vals.push_back(eigenvalues(0));
|
281 |
+
max_vals.push_back(eigenvalues(p-1));
|
282 |
+
}
|
283 |
+
|
284 |
+
// Average over seeds for stability
|
285 |
+
double min_sum = 0.0, max_sum = 0.0;
|
286 |
+
for (double val : min_vals) min_sum += val;
|
287 |
+
for (double val : max_vals) max_sum += val;
|
288 |
+
|
289 |
+
min_eigenvalues[i] = min_sum / seeds;
|
290 |
+
max_eigenvalues[i] = max_sum / seeds;
|
291 |
+
}
|
292 |
+
|
293 |
+
return std::make_tuple(min_eigenvalues, max_eigenvalues);
|
294 |
+
}
|
295 |
+
|
296 |
+
// Compute high y curve
|
297 |
+
std::vector<double> compute_high_y_curve(const std::vector<double>& betas, double z_a, double y) {
|
298 |
+
double y_effective = apply_y_condition(y);
|
299 |
+
size_t n = betas.size();
|
300 |
+
std::vector<double> result(n);
|
301 |
+
|
302 |
+
double a = z_a;
|
303 |
+
double denominator = 1.0 - 2.0 * a;
|
304 |
+
|
305 |
+
if (std::abs(denominator) < 1e-10) {
|
306 |
+
// Handle division by zero
|
307 |
+
std::fill(result.begin(), result.end(), std::numeric_limits<double>::quiet_NaN());
|
308 |
+
return result;
|
309 |
+
}
|
310 |
+
|
311 |
+
for (size_t i = 0; i < n; i++) {
|
312 |
+
double beta = betas[i];
|
313 |
+
double numerator = -4.0 * a * (a - 1.0) * y_effective * beta - 2.0 * a * y_effective - 2.0 * a * (2.0 * a - 1.0);
|
314 |
+
result[i] = numerator / denominator;
|
315 |
+
}
|
316 |
+
|
317 |
+
return result;
|
318 |
+
}
|
319 |
+
|
320 |
+
// Compute alternate low expression
|
321 |
+
std::vector<double> compute_alternate_low_expr(const std::vector<double>& betas, double z_a, double y) {
|
322 |
+
double y_effective = apply_y_condition(y);
|
323 |
+
size_t n = betas.size();
|
324 |
+
std::vector<double> result(n);
|
325 |
+
|
326 |
+
for (size_t i = 0; i < n; i++) {
|
327 |
+
double beta = betas[i];
|
328 |
+
result[i] = (z_a * y_effective * beta * (z_a - 1.0) - 2.0 * z_a * (1.0 - y_effective) - 2.0 * z_a * z_a) / (2.0 + 2.0 * z_a);
|
329 |
+
}
|
330 |
+
|
331 |
+
return result;
|
332 |
+
}
|
333 |
+
|
334 |
+
// Compute max k expression
|
335 |
+
std::vector<double> compute_max_k_expression(const std::vector<double>& betas, double z_a, double y, int k_samples=1000) {
|
336 |
+
double y_effective = apply_y_condition(y);
|
337 |
+
size_t n = betas.size();
|
338 |
+
std::vector<double> result(n);
|
339 |
+
|
340 |
+
// Sample k values on logarithmic scale
|
341 |
+
std::vector<double> k_values(k_samples);
|
342 |
+
double log_min = std::log(0.001);
|
343 |
+
double log_max = std::log(1000.0);
|
344 |
+
double log_step = (log_max - log_min) / (k_samples - 1);
|
345 |
+
|
346 |
+
for (int i = 0; i < k_samples; i++) {
|
347 |
+
k_values[i] = std::exp(log_min + i * log_step);
|
348 |
+
}
|
349 |
+
|
350 |
+
for (size_t i = 0; i < n; i++) {
|
351 |
+
double beta = betas[i];
|
352 |
+
std::vector<double> values(k_samples);
|
353 |
+
|
354 |
+
for (int j = 0; j < k_samples; j++) {
|
355 |
+
double k = k_values[j];
|
356 |
+
double numerator = y_effective * beta * (z_a - 1.0) * k + (z_a * k + 1.0) * ((y_effective - 1.0) * k - 1.0);
|
357 |
+
double denominator = (z_a * k + 1.0) * (k * k + k);
|
358 |
+
|
359 |
+
if (std::abs(denominator) < 1e-10) {
|
360 |
+
values[j] = std::numeric_limits<double>::quiet_NaN();
|
361 |
+
} else {
|
362 |
+
values[j] = numerator / denominator;
|
363 |
+
}
|
364 |
+
}
|
365 |
+
|
366 |
+
// Find maximum value, ignoring NaNs
|
367 |
+
double max_val = -std::numeric_limits<double>::infinity();
|
368 |
+
bool found_valid = false;
|
369 |
+
|
370 |
+
for (double val : values) {
|
371 |
+
if (!std::isnan(val) && val > max_val) {
|
372 |
+
max_val = val;
|
373 |
+
found_valid = true;
|
374 |
+
}
|
375 |
+
}
|
376 |
+
|
377 |
+
result[i] = found_valid ? max_val : std::numeric_limits<double>::quiet_NaN();
|
378 |
+
}
|
379 |
+
|
380 |
+
return result;
|
381 |
+
}
|
382 |
+
|
383 |
+
// Compute min t expression
|
384 |
+
std::vector<double> compute_min_t_expression(const std::vector<double>& betas, double z_a, double y, int t_samples=1000) {
|
385 |
+
double y_effective = apply_y_condition(y);
|
386 |
+
size_t n = betas.size();
|
387 |
+
std::vector<double> result(n);
|
388 |
+
|
389 |
+
if (z_a <= 0) {
|
390 |
+
std::fill(result.begin(), result.end(), std::numeric_limits<double>::quiet_NaN());
|
391 |
+
return result;
|
392 |
+
}
|
393 |
+
|
394 |
+
// Sample t values in (-1/a, 0)
|
395 |
+
double lower_bound = -1.0 / z_a + 1e-10; // Avoid division by zero
|
396 |
+
std::vector<double> t_values(t_samples);
|
397 |
+
double t_step = (0.0 - lower_bound) / (t_samples - 1);
|
398 |
+
|
399 |
+
for (int i = 0; i < t_samples; i++) {
|
400 |
+
t_values[i] = lower_bound + i * t_step * (1.0 - 1e-10); // Avoid exactly 0
|
401 |
+
}
|
402 |
+
|
403 |
+
for (size_t i = 0; i < n; i++) {
|
404 |
+
double beta = betas[i];
|
405 |
+
std::vector<double> values(t_samples);
|
406 |
+
|
407 |
+
for (int j = 0; j < t_samples; j++) {
|
408 |
+
double t = t_values[j];
|
409 |
+
double numerator = y_effective * beta * (z_a - 1.0) * t + (z_a * t + 1.0) * ((y_effective - 1.0) * t - 1.0);
|
410 |
+
double denominator = (z_a * t + 1.0) * (t * t + t);
|
411 |
+
|
412 |
+
if (std::abs(denominator) < 1e-10) {
|
413 |
+
values[j] = std::numeric_limits<double>::quiet_NaN();
|
414 |
+
} else {
|
415 |
+
values[j] = numerator / denominator;
|
416 |
+
}
|
417 |
+
}
|
418 |
+
|
419 |
+
// Find minimum value, ignoring NaNs
|
420 |
+
double min_val = std::numeric_limits<double>::infinity();
|
421 |
+
bool found_valid = false;
|
422 |
+
|
423 |
+
for (double val : values) {
|
424 |
+
if (!std::isnan(val) && val < min_val) {
|
425 |
+
min_val = val;
|
426 |
+
found_valid = true;
|
427 |
+
}
|
428 |
+
}
|
429 |
+
|
430 |
+
result[i] = found_valid ? min_val : std::numeric_limits<double>::quiet_NaN();
|
431 |
+
}
|
432 |
+
|
433 |
+
return result;
|
434 |
+
}
|
435 |
+
|
436 |
+
// Compute derivatives
|
437 |
+
std::tuple<std::vector<double>, std::vector<double>>
|
438 |
+
compute_derivatives(const std::vector<double>& curve, const std::vector<double>& betas) {
|
439 |
+
size_t n = betas.size();
|
440 |
+
std::vector<double> d1(n, 0.0);
|
441 |
+
std::vector<double> d2(n, 0.0);
|
442 |
+
|
443 |
+
// First derivative using central difference
|
444 |
+
for (size_t i = 1; i < n - 1; i++) {
|
445 |
+
double h = betas[i+1] - betas[i-1];
|
446 |
+
d1[i] = (curve[i+1] - curve[i-1]) / h;
|
447 |
+
}
|
448 |
+
|
449 |
+
// Handle endpoints with forward/backward difference
|
450 |
+
if (n > 1) {
|
451 |
+
d1[0] = (curve[1] - curve[0]) / (betas[1] - betas[0]);
|
452 |
+
d1[n-1] = (curve[n-1] - curve[n-2]) / (betas[n-1] - betas[n-2]);
|
453 |
+
}
|
454 |
+
|
455 |
+
// Second derivative using central difference
|
456 |
+
for (size_t i = 1; i < n - 1; i++) {
|
457 |
+
double h = betas[i+1] - betas[i-1];
|
458 |
+
d2[i] = 2.0 * (curve[i+1] - 2.0 * curve[i] + curve[i-1]) / (h * h);
|
459 |
+
}
|
460 |
+
|
461 |
+
// Handle endpoints
|
462 |
+
if (n > 2) {
|
463 |
+
d2[0] = d2[1];
|
464 |
+
d2[n-1] = d2[n-2];
|
465 |
+
}
|
466 |
+
|
467 |
+
return std::make_tuple(d1, d2);
|
468 |
+
}
|
469 |
+
|
470 |
+
// Generate eigenvalue distribution
|
471 |
+
std::vector<double> generate_eigenvalue_distribution(double beta, double y, double z_a, int n, int seed) {
|
472 |
+
double y_effective = apply_y_condition(y);
|
473 |
+
|
474 |
+
// Set random seed
|
475 |
+
std::mt19937 gen(seed);
|
476 |
+
std::normal_distribution<double> normal_dist(0.0, 1.0);
|
477 |
+
|
478 |
+
// Compute dimension p based on aspect ratio y
|
479 |
+
int p = static_cast<int>(y_effective * n);
|
480 |
+
|
481 |
+
// Constructing T_n (Population/Shape Matrix)
|
482 |
+
int k = static_cast<int>(std::floor(beta * p));
|
483 |
+
Eigen::VectorXd diag_entries(p);
|
484 |
+
|
485 |
+
// Fill diagonal entries
|
486 |
+
for (int j = 0; j < k; j++) {
|
487 |
+
diag_entries(j) = z_a;
|
488 |
+
}
|
489 |
+
for (int j = k; j < p; j++) {
|
490 |
+
diag_entries(j) = 1.0;
|
491 |
+
}
|
492 |
+
|
493 |
+
// Shuffle diagonal entries
|
494 |
+
for (int j = p - 1; j > 0; j--) {
|
495 |
+
std::uniform_int_distribution<int> uniform_dist(0, j);
|
496 |
+
int idx = uniform_dist(gen);
|
497 |
+
std::swap(diag_entries(j), diag_entries(idx));
|
498 |
+
}
|
499 |
+
|
500 |
+
Eigen::MatrixXd T_n = diag_entries.asDiagonal();
|
501 |
+
|
502 |
+
// Generate the data matrix X with i.i.d. standard normal entries
|
503 |
+
Eigen::MatrixXd X(p, n);
|
504 |
+
for (int row = 0; row < p; row++) {
|
505 |
+
for (int col = 0; col < n; col++) {
|
506 |
+
X(row, col) = normal_dist(gen);
|
507 |
+
}
|
508 |
+
}
|
509 |
+
|
510 |
+
// Compute the sample covariance matrix S_n = (1/n) * XX^T
|
511 |
+
Eigen::MatrixXd S_n = (1.0 / n) * (X * X.transpose());
|
512 |
+
|
513 |
+
// Compute B_n = S_n T_n
|
514 |
+
Eigen::MatrixXd B_n = S_n * T_n;
|
515 |
+
|
516 |
+
// Compute eigenvalues of B_n
|
517 |
+
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> solver(B_n);
|
518 |
+
Eigen::VectorXd eigenvalues = solver.eigenvalues();
|
519 |
+
|
520 |
+
// Convert to std::vector
|
521 |
+
std::vector<double> result(p);
|
522 |
+
for (int i = 0; i < p; i++) {
|
523 |
+
result[i] = eigenvalues(i);
|
524 |
+
}
|
525 |
+
|
526 |
+
return result;
|
527 |
+
}
|
528 |
+
|
529 |
+
// Python module definition
|
530 |
+
PYBIND11_MODULE(cubic_cpp, m) {
|
531 |
+
m.doc() = "C++ accelerated functions for cubic root analysis";
|
532 |
+
|
533 |
+
m.def("discriminant_func", &discriminant_func,
|
534 |
+
"Calculate cubic discriminant",
|
535 |
+
py::arg("z"), py::arg("beta"), py::arg("z_a"), py::arg("y"));
|
536 |
+
|
537 |
+
m.def("find_z_at_discriminant_zero", &find_z_at_discriminant_zero,
|
538 |
+
"Find zeros of discriminant",
|
539 |
+
py::arg("z_a"), py::arg("y"), py::arg("beta"), py::arg("z_min"),
|
540 |
+
py::arg("z_max"), py::arg("steps"));
|
541 |
+
|
542 |
+
m.def("sweep_beta_and_find_z_bounds", &sweep_beta_and_find_z_bounds,
|
543 |
+
"Compute support boundaries by sweeping beta",
|
544 |
+
py::arg("z_a"), py::arg("y"), py::arg("z_min"), py::arg("z_max"),
|
545 |
+
py::arg("beta_steps"), py::arg("z_steps"));
|
546 |
+
|
547 |
+
m.def("compute_cubic_roots", &compute_cubic_roots,
|
548 |
+
"Compute roots of cubic equation",
|
549 |
+
py::arg("z"), py::arg("beta"), py::arg("z_a"), py::arg("y"));
|
550 |
+
|
551 |
+
m.def("compute_eigenvalue_support_boundaries", &compute_eigenvalue_support_boundaries,
|
552 |
+
"Compute eigenvalue support boundaries using random matrices",
|
553 |
+
py::arg("z_a"), py::arg("y"), py::arg("beta_values"),
|
554 |
+
py::arg("n_samples"), py::arg("seeds"));
|
555 |
+
|
556 |
+
m.def("compute_high_y_curve", &compute_high_y_curve,
|
557 |
+
"Compute high y expression curve",
|
558 |
+
py::arg("betas"), py::arg("z_a"), py::arg("y"));
|
559 |
+
|
560 |
+
m.def("compute_alternate_low_expr", &compute_alternate_low_expr,
|
561 |
+
"Compute alternate low expression curve",
|
562 |
+
py::arg("betas"), py::arg("z_a"), py::arg("y"));
|
563 |
+
|
564 |
+
m.def("compute_max_k_expression", &compute_max_k_expression,
|
565 |
+
"Compute max k expression",
|
566 |
+
py::arg("betas"), py::arg("z_a"), py::arg("y"), py::arg("k_samples") = 1000);
|
567 |
+
|
568 |
+
m.def("compute_min_t_expression", &compute_min_t_expression,
|
569 |
+
"Compute min t expression",
|
570 |
+
py::arg("betas"), py::arg("z_a"), py::arg("y"), py::arg("t_samples") = 1000);
|
571 |
+
|
572 |
+
m.def("compute_derivatives", &compute_derivatives,
|
573 |
+
"Compute first and second derivatives",
|
574 |
+
py::arg("curve"), py::arg("betas"));
|
575 |
+
|
576 |
+
m.def("generate_eigenvalue_distribution", &generate_eigenvalue_distribution,
|
577 |
+
"Generate eigenvalue distribution simulation",
|
578 |
+
py::arg("beta"), py::arg("y"), py::arg("z_a"), py::arg("n"), py::arg("seed"));
|
579 |
+
}
|