Spaces:
Running
on
Zero
Running
on
Zero
adds all available datapoints for extension windows and optional smoothing
Browse files
app.py
CHANGED
@@ -371,7 +371,7 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
371 |
use_ensemble: bool = True, use_regime_detection: bool = True, use_stress_testing: bool = True,
|
372 |
risk_free_rate: float = 0.02, ensemble_weights: Dict = None,
|
373 |
market_index: str = "^GSPC",
|
374 |
-
random_real_points: int = 4) -> Tuple[Dict, go.Figure]:
|
375 |
"""
|
376 |
Make prediction using selected strategy with advanced features.
|
377 |
|
@@ -387,6 +387,7 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
387 |
ensemble_weights (Dict): Weights for ensemble models
|
388 |
market_index (str): Market index for correlation analysis
|
389 |
random_real_points (int): Number of random real points to include in long-horizon context
|
|
|
390 |
|
391 |
Returns:
|
392 |
Tuple[Dict, go.Figure]: Trading signals and visualization plot
|
@@ -646,15 +647,15 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
646 |
print(f"Warning: Discontinuity detected between last actual ({last_actual}) and first prediction ({first_pred})")
|
647 |
# Apply continuity correction to first prediction
|
648 |
mean_pred[0] = last_actual
|
649 |
-
# Adjust subsequent predictions to maintain trend with smoothing
|
650 |
if len(mean_pred) > 1:
|
651 |
# Calculate the trend from the original prediction
|
652 |
original_trend = mean_pred[1] - first_pred
|
653 |
# Apply the same trend but starting from the last actual value
|
654 |
for i in range(1, len(mean_pred)):
|
655 |
mean_pred[i] = last_actual + original_trend * i
|
656 |
-
# Add
|
657 |
-
if i > 1:
|
658 |
smoothing_factor = 0.95
|
659 |
mean_pred[i] = smoothing_factor * mean_pred[i] + (1 - smoothing_factor) * mean_pred[i-1]
|
660 |
|
@@ -668,8 +669,16 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
668 |
steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
|
669 |
for step in range(steps_needed):
|
670 |
|
671 |
-
# Use
|
672 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
673 |
scaler = MinMaxScaler(feature_range=(-1, 1))
|
674 |
|
675 |
# Convert to tensor and ensure proper shape
|
@@ -760,15 +769,15 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
760 |
print(f"Warning: Discontinuity detected between last actual volume ({last_actual}) and first prediction ({first_pred})")
|
761 |
# Apply continuity correction
|
762 |
volume_pred[0] = last_actual
|
763 |
-
# Adjust subsequent predictions to maintain trend with smoothing
|
764 |
if len(volume_pred) > 1:
|
765 |
# Calculate the trend from the original prediction
|
766 |
original_trend = volume_pred[1] - first_pred
|
767 |
# Apply the same trend but starting from the last actual value
|
768 |
for i in range(1, len(volume_pred)):
|
769 |
volume_pred[i] = last_actual + original_trend * i
|
770 |
-
# Add
|
771 |
-
if i > 1:
|
772 |
smoothing_factor = 0.95
|
773 |
volume_pred[i] = smoothing_factor * volume_pred[i] + (1 - smoothing_factor) * volume_pred[i-1]
|
774 |
# Extend volume predictions if needed
|
@@ -778,22 +787,16 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
778 |
remaining_steps = trim_length - actual_prediction_length
|
779 |
steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
|
780 |
for step in range(steps_needed):
|
781 |
-
# Use
|
782 |
-
|
783 |
-
|
784 |
-
|
785 |
-
|
786 |
-
|
787 |
-
|
788 |
-
])
|
789 |
else:
|
790 |
-
|
791 |
-
|
792 |
-
random_real = np.random.choice(volume_data, size=n_random_real, replace=False)
|
793 |
-
context_window = np.concatenate([
|
794 |
-
extended_mean_pred[-(window_size - n_random_real):],
|
795 |
-
random_real
|
796 |
-
])
|
797 |
volume_scaler = MinMaxScaler(feature_range=(-1, 1))
|
798 |
normalized_context = volume_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
|
799 |
context = torch.tensor(normalized_context, dtype=dtype, device=device)
|
@@ -880,21 +883,16 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
880 |
remaining_steps = trim_length - actual_prediction_length
|
881 |
steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
|
882 |
for step in range(steps_needed):
|
883 |
-
|
884 |
-
|
885 |
-
|
886 |
-
|
887 |
-
|
888 |
-
|
889 |
-
]
|
890 |
else:
|
891 |
-
|
892 |
-
|
893 |
-
random_real = np.random.choice(rsi_data, size=n_random_real, replace=False)
|
894 |
-
context_window = np.concatenate([
|
895 |
-
extended_mean_pred[-(window_size - n_random_real):],
|
896 |
-
random_real
|
897 |
-
])
|
898 |
rsi_scaler = MinMaxScaler(feature_range=(-1, 1))
|
899 |
normalized_context = rsi_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
|
900 |
context = torch.tensor(normalized_context, dtype=dtype, device=device)
|
@@ -971,15 +969,15 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
971 |
print(f"Warning: Discontinuity detected between last actual MACD ({last_actual}) and first prediction ({first_pred})")
|
972 |
# Apply continuity correction
|
973 |
macd_pred[0] = last_actual
|
974 |
-
# Adjust subsequent predictions to maintain trend with smoothing
|
975 |
if len(macd_pred) > 1:
|
976 |
# Calculate the trend from the original prediction
|
977 |
original_trend = macd_pred[1] - first_pred
|
978 |
# Apply the same trend but starting from the last actual value
|
979 |
for i in range(1, len(macd_pred)):
|
980 |
macd_pred[i] = last_actual + original_trend * i
|
981 |
-
# Add
|
982 |
-
if i > 1:
|
983 |
smoothing_factor = 0.95
|
984 |
macd_pred[i] = smoothing_factor * macd_pred[i] + (1 - smoothing_factor) * macd_pred[i-1]
|
985 |
if actual_prediction_length < trim_length:
|
@@ -988,21 +986,16 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
988 |
remaining_steps = trim_length - actual_prediction_length
|
989 |
steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
|
990 |
for step in range(steps_needed):
|
991 |
-
|
992 |
-
|
993 |
-
|
994 |
-
|
995 |
-
|
996 |
-
|
997 |
-
]
|
998 |
else:
|
999 |
-
|
1000 |
-
|
1001 |
-
random_real = np.random.choice(macd_data, size=n_random_real, replace=False)
|
1002 |
-
context_window = np.concatenate([
|
1003 |
-
extended_mean_pred[-(window_size - n_random_real):],
|
1004 |
-
random_real
|
1005 |
-
])
|
1006 |
macd_scaler = MinMaxScaler(feature_range=(-1, 1))
|
1007 |
normalized_context = macd_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
|
1008 |
context = torch.tensor(normalized_context, dtype=dtype, device=device)
|
@@ -1941,6 +1934,7 @@ def create_interface():
|
|
1941 |
use_ensemble = gr.Checkbox(label="Use Ensemble Methods", value=True)
|
1942 |
use_regime_detection = gr.Checkbox(label="Use Regime Detection", value=True)
|
1943 |
use_stress_testing = gr.Checkbox(label="Use Stress Testing", value=True)
|
|
|
1944 |
risk_free_rate = gr.Slider(
|
1945 |
minimum=0.0,
|
1946 |
maximum=0.1,
|
@@ -2174,7 +2168,7 @@ def create_interface():
|
|
2174 |
def analyze_stock(symbol, timeframe, prediction_days, lookback_days, strategy,
|
2175 |
use_ensemble, use_regime_detection, use_stress_testing,
|
2176 |
risk_free_rate, market_index, chronos_weight, technical_weight, statistical_weight,
|
2177 |
-
random_real_points):
|
2178 |
try:
|
2179 |
# Create ensemble weights
|
2180 |
ensemble_weights = {
|
@@ -2199,7 +2193,8 @@ def create_interface():
|
|
2199 |
risk_free_rate=risk_free_rate,
|
2200 |
ensemble_weights=ensemble_weights,
|
2201 |
market_index=market_index,
|
2202 |
-
random_real_points=random_real_points
|
|
|
2203 |
)
|
2204 |
|
2205 |
# Get historical data for additional metrics
|
@@ -2282,7 +2277,7 @@ def create_interface():
|
|
2282 |
# Daily analysis button click
|
2283 |
def daily_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
|
2284 |
rfr: float, mi: str, cw: float, tw: float, sw: float,
|
2285 |
-
rrp: int) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
|
2286 |
"""
|
2287 |
Process daily timeframe stock analysis with advanced features.
|
2288 |
|
@@ -2317,6 +2312,8 @@ def create_interface():
|
|
2317 |
sw (float): Statistical weight in ensemble (0.0-1.0)
|
2318 |
Weight given to statistical model predictions in ensemble methods
|
2319 |
rrp (int): Number of random real points to include in long-horizon context
|
|
|
|
|
2320 |
|
2321 |
Returns:
|
2322 |
Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
|
@@ -2336,7 +2333,7 @@ def create_interface():
|
|
2336 |
|
2337 |
Example:
|
2338 |
>>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = daily_analysis(
|
2339 |
-
... "AAPL", 30, 365, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4
|
2340 |
... )
|
2341 |
|
2342 |
Notes:
|
@@ -2345,15 +2342,16 @@ def create_interface():
|
|
2345 |
- Historical data can go back up to 10 years (3650 days)
|
2346 |
- Ensemble weights should sum to 1.0 for optimal results
|
2347 |
- Risk-free rate is typically between 0.02-0.05 (2-5% annually)
|
|
|
2348 |
"""
|
2349 |
-
return analyze_stock(s, "1d", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp)
|
2350 |
|
2351 |
daily_predict_btn.click(
|
2352 |
fn=daily_analysis,
|
2353 |
inputs=[daily_symbol, daily_prediction_days, daily_lookback_days, daily_strategy,
|
2354 |
use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
|
2355 |
chronos_weight, technical_weight, statistical_weight,
|
2356 |
-
random_real_points],
|
2357 |
outputs=[daily_signals, daily_plot, daily_metrics, daily_risk_metrics, daily_sector_metrics,
|
2358 |
daily_regime_metrics, daily_stress_results, daily_ensemble_metrics, daily_signals_advanced]
|
2359 |
)
|
@@ -2361,7 +2359,7 @@ def create_interface():
|
|
2361 |
# Hourly analysis button click
|
2362 |
def hourly_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
|
2363 |
rfr: float, mi: str, cw: float, tw: float, sw: float,
|
2364 |
-
rrp: int) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
|
2365 |
"""
|
2366 |
Process hourly timeframe stock analysis with advanced features.
|
2367 |
|
@@ -2396,6 +2394,8 @@ def create_interface():
|
|
2396 |
sw (float): Statistical weight in ensemble (0.0-1.0)
|
2397 |
Weight for statistical models in ensemble predictions
|
2398 |
rrp (int): Number of random real points to include in long-horizon context
|
|
|
|
|
2399 |
|
2400 |
Returns:
|
2401 |
Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
|
@@ -2415,7 +2415,7 @@ def create_interface():
|
|
2415 |
|
2416 |
Example:
|
2417 |
>>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = hourly_analysis(
|
2418 |
-
... "AAPL", 3, 14, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4
|
2419 |
... )
|
2420 |
|
2421 |
Notes:
|
@@ -2425,15 +2425,16 @@ def create_interface():
|
|
2425 |
- Includes pre/post market data for extended hours analysis
|
2426 |
- Optimized for day trading and swing trading strategies
|
2427 |
- Requires high-liquidity stocks for reliable hourly analysis
|
|
|
2428 |
"""
|
2429 |
-
return analyze_stock(s, "1h", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp)
|
2430 |
|
2431 |
hourly_predict_btn.click(
|
2432 |
fn=hourly_analysis,
|
2433 |
inputs=[hourly_symbol, hourly_prediction_days, hourly_lookback_days, hourly_strategy,
|
2434 |
use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
|
2435 |
chronos_weight, technical_weight, statistical_weight,
|
2436 |
-
random_real_points],
|
2437 |
outputs=[hourly_signals, hourly_plot, hourly_metrics, hourly_risk_metrics, hourly_sector_metrics,
|
2438 |
hourly_regime_metrics, hourly_stress_results, hourly_ensemble_metrics, hourly_signals_advanced]
|
2439 |
)
|
@@ -2441,7 +2442,7 @@ def create_interface():
|
|
2441 |
# 15-minute analysis button click
|
2442 |
def min15_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
|
2443 |
rfr: float, mi: str, cw: float, tw: float, sw: float,
|
2444 |
-
rrp: int) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
|
2445 |
"""
|
2446 |
Process 15-minute timeframe stock analysis with advanced features.
|
2447 |
|
@@ -2476,6 +2477,8 @@ def create_interface():
|
|
2476 |
sw (float): Statistical weight in ensemble (0.0-1.0)
|
2477 |
Weight for statistical models in ensemble predictions
|
2478 |
rrp (int): Number of random real points to include in long-horizon context
|
|
|
|
|
2479 |
|
2480 |
Returns:
|
2481 |
Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
|
@@ -2495,7 +2498,7 @@ def create_interface():
|
|
2495 |
|
2496 |
Example:
|
2497 |
>>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = min15_analysis(
|
2498 |
-
... "AAPL", 1, 3, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4
|
2499 |
... )
|
2500 |
|
2501 |
Notes:
|
@@ -2507,15 +2510,16 @@ def create_interface():
|
|
2507 |
- Includes specialized indicators for intraday momentum and volume analysis
|
2508 |
- Higher transaction costs and slippage considerations for 15-minute strategies
|
2509 |
- Best suited for highly liquid large-cap stocks with tight bid-ask spreads
|
|
|
2510 |
"""
|
2511 |
-
return analyze_stock(s, "15m", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp)
|
2512 |
|
2513 |
min15_predict_btn.click(
|
2514 |
fn=min15_analysis,
|
2515 |
inputs=[min15_symbol, min15_prediction_days, min15_lookback_days, min15_strategy,
|
2516 |
use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
|
2517 |
chronos_weight, technical_weight, statistical_weight,
|
2518 |
-
random_real_points],
|
2519 |
outputs=[min15_signals, min15_plot, min15_metrics, min15_risk_metrics, min15_sector_metrics,
|
2520 |
min15_regime_metrics, min15_stress_results, min15_ensemble_metrics, min15_signals_advanced]
|
2521 |
)
|
|
|
371 |
use_ensemble: bool = True, use_regime_detection: bool = True, use_stress_testing: bool = True,
|
372 |
risk_free_rate: float = 0.02, ensemble_weights: Dict = None,
|
373 |
market_index: str = "^GSPC",
|
374 |
+
random_real_points: int = 4, use_smoothing: bool = True) -> Tuple[Dict, go.Figure]:
|
375 |
"""
|
376 |
Make prediction using selected strategy with advanced features.
|
377 |
|
|
|
387 |
ensemble_weights (Dict): Weights for ensemble models
|
388 |
market_index (str): Market index for correlation analysis
|
389 |
random_real_points (int): Number of random real points to include in long-horizon context
|
390 |
+
use_smoothing (bool): Whether to apply smoothing to predictions
|
391 |
|
392 |
Returns:
|
393 |
Tuple[Dict, go.Figure]: Trading signals and visualization plot
|
|
|
647 |
print(f"Warning: Discontinuity detected between last actual ({last_actual}) and first prediction ({first_pred})")
|
648 |
# Apply continuity correction to first prediction
|
649 |
mean_pred[0] = last_actual
|
650 |
+
# Adjust subsequent predictions to maintain trend with optional smoothing
|
651 |
if len(mean_pred) > 1:
|
652 |
# Calculate the trend from the original prediction
|
653 |
original_trend = mean_pred[1] - first_pred
|
654 |
# Apply the same trend but starting from the last actual value
|
655 |
for i in range(1, len(mean_pred)):
|
656 |
mean_pred[i] = last_actual + original_trend * i
|
657 |
+
# Add smoothing to prevent drift if enabled
|
658 |
+
if use_smoothing and i > 1:
|
659 |
smoothing_factor = 0.95
|
660 |
mean_pred[i] = smoothing_factor * mean_pred[i] + (1 - smoothing_factor) * mean_pred[i-1]
|
661 |
|
|
|
669 |
steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
|
670 |
for step in range(steps_needed):
|
671 |
|
672 |
+
# Use all available datapoints for context, prioritizing actual data over predictions
|
673 |
+
all_available_data = np.concatenate([prices, extended_mean_pred])
|
674 |
+
|
675 |
+
# If we have more data than window_size, use the most recent window_size points
|
676 |
+
# Otherwise, use all available data (this allows for longer context when available)
|
677 |
+
if len(all_available_data) > window_size:
|
678 |
+
context_window = all_available_data[-window_size:]
|
679 |
+
else:
|
680 |
+
context_window = all_available_data
|
681 |
+
|
682 |
scaler = MinMaxScaler(feature_range=(-1, 1))
|
683 |
|
684 |
# Convert to tensor and ensure proper shape
|
|
|
769 |
print(f"Warning: Discontinuity detected between last actual volume ({last_actual}) and first prediction ({first_pred})")
|
770 |
# Apply continuity correction
|
771 |
volume_pred[0] = last_actual
|
772 |
+
# Adjust subsequent predictions to maintain trend with optional smoothing
|
773 |
if len(volume_pred) > 1:
|
774 |
# Calculate the trend from the original prediction
|
775 |
original_trend = volume_pred[1] - first_pred
|
776 |
# Apply the same trend but starting from the last actual value
|
777 |
for i in range(1, len(volume_pred)):
|
778 |
volume_pred[i] = last_actual + original_trend * i
|
779 |
+
# Add smoothing to prevent drift if enabled
|
780 |
+
if use_smoothing and i > 1:
|
781 |
smoothing_factor = 0.95
|
782 |
volume_pred[i] = smoothing_factor * volume_pred[i] + (1 - smoothing_factor) * volume_pred[i-1]
|
783 |
# Extend volume predictions if needed
|
|
|
787 |
remaining_steps = trim_length - actual_prediction_length
|
788 |
steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
|
789 |
for step in range(steps_needed):
|
790 |
+
# Use all available datapoints for context, prioritizing actual data over predictions
|
791 |
+
all_available_data = np.concatenate([volume_data, extended_mean_pred])
|
792 |
+
|
793 |
+
# If we have more data than window_size, use the most recent window_size points
|
794 |
+
# Otherwise, use all available data (this allows for longer context when available)
|
795 |
+
if len(all_available_data) > window_size:
|
796 |
+
context_window = all_available_data[-window_size:]
|
|
|
797 |
else:
|
798 |
+
context_window = all_available_data
|
799 |
+
|
|
|
|
|
|
|
|
|
|
|
800 |
volume_scaler = MinMaxScaler(feature_range=(-1, 1))
|
801 |
normalized_context = volume_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
|
802 |
context = torch.tensor(normalized_context, dtype=dtype, device=device)
|
|
|
883 |
remaining_steps = trim_length - actual_prediction_length
|
884 |
steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
|
885 |
for step in range(steps_needed):
|
886 |
+
# Use all available datapoints for context, prioritizing actual data over predictions
|
887 |
+
all_available_data = np.concatenate([rsi_data, extended_mean_pred])
|
888 |
+
|
889 |
+
# If we have more data than window_size, use the most recent window_size points
|
890 |
+
# Otherwise, use all available data (this allows for longer context when available)
|
891 |
+
if len(all_available_data) > window_size:
|
892 |
+
context_window = all_available_data[-window_size:]
|
893 |
else:
|
894 |
+
context_window = all_available_data
|
895 |
+
|
|
|
|
|
|
|
|
|
|
|
896 |
rsi_scaler = MinMaxScaler(feature_range=(-1, 1))
|
897 |
normalized_context = rsi_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
|
898 |
context = torch.tensor(normalized_context, dtype=dtype, device=device)
|
|
|
969 |
print(f"Warning: Discontinuity detected between last actual MACD ({last_actual}) and first prediction ({first_pred})")
|
970 |
# Apply continuity correction
|
971 |
macd_pred[0] = last_actual
|
972 |
+
# Adjust subsequent predictions to maintain trend with optional smoothing
|
973 |
if len(macd_pred) > 1:
|
974 |
# Calculate the trend from the original prediction
|
975 |
original_trend = macd_pred[1] - first_pred
|
976 |
# Apply the same trend but starting from the last actual value
|
977 |
for i in range(1, len(macd_pred)):
|
978 |
macd_pred[i] = last_actual + original_trend * i
|
979 |
+
# Add smoothing to prevent drift if enabled
|
980 |
+
if use_smoothing and i > 1:
|
981 |
smoothing_factor = 0.95
|
982 |
macd_pred[i] = smoothing_factor * macd_pred[i] + (1 - smoothing_factor) * macd_pred[i-1]
|
983 |
if actual_prediction_length < trim_length:
|
|
|
986 |
remaining_steps = trim_length - actual_prediction_length
|
987 |
steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
|
988 |
for step in range(steps_needed):
|
989 |
+
# Use all available datapoints for context, prioritizing actual data over predictions
|
990 |
+
all_available_data = np.concatenate([macd_data, extended_mean_pred])
|
991 |
+
|
992 |
+
# If we have more data than window_size, use the most recent window_size points
|
993 |
+
# Otherwise, use all available data (this allows for longer context when available)
|
994 |
+
if len(all_available_data) > window_size:
|
995 |
+
context_window = all_available_data[-window_size:]
|
996 |
else:
|
997 |
+
context_window = all_available_data
|
998 |
+
|
|
|
|
|
|
|
|
|
|
|
999 |
macd_scaler = MinMaxScaler(feature_range=(-1, 1))
|
1000 |
normalized_context = macd_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
|
1001 |
context = torch.tensor(normalized_context, dtype=dtype, device=device)
|
|
|
1934 |
use_ensemble = gr.Checkbox(label="Use Ensemble Methods", value=True)
|
1935 |
use_regime_detection = gr.Checkbox(label="Use Regime Detection", value=True)
|
1936 |
use_stress_testing = gr.Checkbox(label="Use Stress Testing", value=True)
|
1937 |
+
use_smoothing = gr.Checkbox(label="Use Smoothing", value=True)
|
1938 |
risk_free_rate = gr.Slider(
|
1939 |
minimum=0.0,
|
1940 |
maximum=0.1,
|
|
|
2168 |
def analyze_stock(symbol, timeframe, prediction_days, lookback_days, strategy,
|
2169 |
use_ensemble, use_regime_detection, use_stress_testing,
|
2170 |
risk_free_rate, market_index, chronos_weight, technical_weight, statistical_weight,
|
2171 |
+
random_real_points, use_smoothing):
|
2172 |
try:
|
2173 |
# Create ensemble weights
|
2174 |
ensemble_weights = {
|
|
|
2193 |
risk_free_rate=risk_free_rate,
|
2194 |
ensemble_weights=ensemble_weights,
|
2195 |
market_index=market_index,
|
2196 |
+
random_real_points=random_real_points,
|
2197 |
+
use_smoothing=use_smoothing
|
2198 |
)
|
2199 |
|
2200 |
# Get historical data for additional metrics
|
|
|
2277 |
# Daily analysis button click
|
2278 |
def daily_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
|
2279 |
rfr: float, mi: str, cw: float, tw: float, sw: float,
|
2280 |
+
rrp: int, usm: bool) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
|
2281 |
"""
|
2282 |
Process daily timeframe stock analysis with advanced features.
|
2283 |
|
|
|
2312 |
sw (float): Statistical weight in ensemble (0.0-1.0)
|
2313 |
Weight given to statistical model predictions in ensemble methods
|
2314 |
rrp (int): Number of random real points to include in long-horizon context
|
2315 |
+
usm (bool): Use smoothing
|
2316 |
+
When True, applies smoothing to predictions to reduce noise and improve continuity
|
2317 |
|
2318 |
Returns:
|
2319 |
Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
|
|
|
2333 |
|
2334 |
Example:
|
2335 |
>>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = daily_analysis(
|
2336 |
+
... "AAPL", 30, 365, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4, True
|
2337 |
... )
|
2338 |
|
2339 |
Notes:
|
|
|
2342 |
- Historical data can go back up to 10 years (3650 days)
|
2343 |
- Ensemble weights should sum to 1.0 for optimal results
|
2344 |
- Risk-free rate is typically between 0.02-0.05 (2-5% annually)
|
2345 |
+
- Smoothing helps reduce prediction noise but may reduce responsiveness to sudden changes
|
2346 |
"""
|
2347 |
+
return analyze_stock(s, "1d", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp, usm)
|
2348 |
|
2349 |
daily_predict_btn.click(
|
2350 |
fn=daily_analysis,
|
2351 |
inputs=[daily_symbol, daily_prediction_days, daily_lookback_days, daily_strategy,
|
2352 |
use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
|
2353 |
chronos_weight, technical_weight, statistical_weight,
|
2354 |
+
random_real_points, use_smoothing],
|
2355 |
outputs=[daily_signals, daily_plot, daily_metrics, daily_risk_metrics, daily_sector_metrics,
|
2356 |
daily_regime_metrics, daily_stress_results, daily_ensemble_metrics, daily_signals_advanced]
|
2357 |
)
|
|
|
2359 |
# Hourly analysis button click
|
2360 |
def hourly_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
|
2361 |
rfr: float, mi: str, cw: float, tw: float, sw: float,
|
2362 |
+
rrp: int, usm: bool) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
|
2363 |
"""
|
2364 |
Process hourly timeframe stock analysis with advanced features.
|
2365 |
|
|
|
2394 |
sw (float): Statistical weight in ensemble (0.0-1.0)
|
2395 |
Weight for statistical models in ensemble predictions
|
2396 |
rrp (int): Number of random real points to include in long-horizon context
|
2397 |
+
usm (bool): Use smoothing
|
2398 |
+
When True, applies smoothing to predictions to reduce noise and improve continuity
|
2399 |
|
2400 |
Returns:
|
2401 |
Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
|
|
|
2415 |
|
2416 |
Example:
|
2417 |
>>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = hourly_analysis(
|
2418 |
+
... "AAPL", 3, 14, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4, True
|
2419 |
... )
|
2420 |
|
2421 |
Notes:
|
|
|
2425 |
- Includes pre/post market data for extended hours analysis
|
2426 |
- Optimized for day trading and swing trading strategies
|
2427 |
- Requires high-liquidity stocks for reliable hourly analysis
|
2428 |
+
- Smoothing helps reduce prediction noise but may reduce responsiveness to sudden changes
|
2429 |
"""
|
2430 |
+
return analyze_stock(s, "1h", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp, usm)
|
2431 |
|
2432 |
hourly_predict_btn.click(
|
2433 |
fn=hourly_analysis,
|
2434 |
inputs=[hourly_symbol, hourly_prediction_days, hourly_lookback_days, hourly_strategy,
|
2435 |
use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
|
2436 |
chronos_weight, technical_weight, statistical_weight,
|
2437 |
+
random_real_points, use_smoothing],
|
2438 |
outputs=[hourly_signals, hourly_plot, hourly_metrics, hourly_risk_metrics, hourly_sector_metrics,
|
2439 |
hourly_regime_metrics, hourly_stress_results, hourly_ensemble_metrics, hourly_signals_advanced]
|
2440 |
)
|
|
|
2442 |
# 15-minute analysis button click
|
2443 |
def min15_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
|
2444 |
rfr: float, mi: str, cw: float, tw: float, sw: float,
|
2445 |
+
rrp: int, usm: bool) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
|
2446 |
"""
|
2447 |
Process 15-minute timeframe stock analysis with advanced features.
|
2448 |
|
|
|
2477 |
sw (float): Statistical weight in ensemble (0.0-1.0)
|
2478 |
Weight for statistical models in ensemble predictions
|
2479 |
rrp (int): Number of random real points to include in long-horizon context
|
2480 |
+
usm (bool): Use smoothing
|
2481 |
+
When True, applies smoothing to predictions to reduce noise and improve continuity
|
2482 |
|
2483 |
Returns:
|
2484 |
Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
|
|
|
2498 |
|
2499 |
Example:
|
2500 |
>>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = min15_analysis(
|
2501 |
+
... "AAPL", 1, 3, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4, True
|
2502 |
... )
|
2503 |
|
2504 |
Notes:
|
|
|
2510 |
- Includes specialized indicators for intraday momentum and volume analysis
|
2511 |
- Higher transaction costs and slippage considerations for 15-minute strategies
|
2512 |
- Best suited for highly liquid large-cap stocks with tight bid-ask spreads
|
2513 |
+
- Smoothing helps reduce prediction noise but may reduce responsiveness to sudden changes
|
2514 |
"""
|
2515 |
+
return analyze_stock(s, "15m", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp, usm)
|
2516 |
|
2517 |
min15_predict_btn.click(
|
2518 |
fn=min15_analysis,
|
2519 |
inputs=[min15_symbol, min15_prediction_days, min15_lookback_days, min15_strategy,
|
2520 |
use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
|
2521 |
chronos_weight, technical_weight, statistical_weight,
|
2522 |
+
random_real_points, use_smoothing],
|
2523 |
outputs=[min15_signals, min15_plot, min15_metrics, min15_risk_metrics, min15_sector_metrics,
|
2524 |
min15_regime_metrics, min15_stress_results, min15_ensemble_metrics, min15_signals_advanced]
|
2525 |
)
|