Tonic commited on
Commit
9dc46d3
·
verified ·
1 Parent(s): 09ddef3

improve smoothing , normalization , solves discontinuity , improves denormalization , improves recursive forcasting

Browse files
Files changed (1) hide show
  1. app.py +293 -135
app.py CHANGED
@@ -370,7 +370,8 @@ def calculate_bollinger_bands(prices: pd.Series, period: int = 20, std_dev: int
370
  def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5, strategy: str = "chronos",
371
  use_ensemble: bool = True, use_regime_detection: bool = True, use_stress_testing: bool = True,
372
  risk_free_rate: float = 0.02, ensemble_weights: Dict = None,
373
- market_index: str = "^GSPC") -> Tuple[Dict, go.Figure]:
 
374
  """
375
  Make prediction using selected strategy with advanced features.
376
 
@@ -385,6 +386,7 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
385
  risk_free_rate (float): Risk-free rate for calculations
386
  ensemble_weights (Dict): Weights for ensemble models
387
  market_index (str): Market index for correlation analysis
 
388
 
389
  Returns:
390
  Tuple[Dict, go.Figure]: Trading signals and visualization plot
@@ -397,10 +399,13 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
397
  try:
398
  # Prepare data for Chronos
399
  prices = df['Close'].values
400
- normalized_prices = scaler.fit_transform(prices.reshape(-1, 1)).flatten()
 
 
 
401
 
402
  # Ensure we have enough data points
403
- min_data_points = 64
404
  if len(normalized_prices) < min_data_points:
405
  padding = np.full(min_data_points - len(normalized_prices), normalized_prices[-1])
406
  normalized_prices = np.concatenate([padding, normalized_prices])
@@ -421,23 +426,17 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
421
 
422
  # Adjust prediction length based on timeframe
423
  if timeframe == "1d":
424
- max_prediction_length = 64 # Chronos maximum
425
- window_size = 64 # Use full context window
426
- elif timeframe == "1h":
427
- max_prediction_length = 64 # Chronos maximum
428
- window_size = 64 # Use full context window
429
- else: # 15m
430
- max_prediction_length = 64 # Chronos maximum
431
- window_size = 64 # Use full context window
432
-
433
- # Calculate actual prediction length based on timeframe
434
- if timeframe == "1d":
435
  actual_prediction_length = min(prediction_days, max_prediction_length)
 
436
  elif timeframe == "1h":
 
437
  actual_prediction_length = min(prediction_days * 24, max_prediction_length)
 
438
  else: # 15m
 
439
  actual_prediction_length = min(prediction_days * 96, max_prediction_length)
440
-
441
  actual_prediction_length = max(1, actual_prediction_length)
442
 
443
  # Use predict_quantiles with proper formatting
@@ -628,7 +627,7 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
628
  quantiles = quantiles.detach().cpu().numpy()
629
  mean = mean.detach().cpu().numpy()
630
 
631
- # Denormalize predictions
632
  mean_pred = scaler.inverse_transform(mean.reshape(-1, 1)).flatten()
633
  lower_bound = scaler.inverse_transform(quantiles[0, :, 0].reshape(-1, 1)).flatten()
634
  upper_bound = scaler.inverse_transform(quantiles[0, :, 2].reshape(-1, 1)).flatten()
@@ -636,37 +635,39 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
636
  # Calculate standard deviation from quantiles
637
  std_pred = (upper_bound - lower_bound) / (2 * 1.645)
638
 
639
- # If we had to limit the prediction length, extend the prediction
640
- if actual_prediction_length < prediction_days:
641
- # Initialize arrays for extended predictions
 
 
 
 
 
642
  extended_mean_pred = mean_pred.copy()
643
  extended_std_pred = std_pred.copy()
644
 
645
  # Calculate the number of extension steps needed
646
- remaining_days = prediction_days - actual_prediction_length
647
- steps_needed = (remaining_days + actual_prediction_length - 1) // actual_prediction_length
648
-
649
  for step in range(steps_needed):
650
- # Use the last window_size points as context for next prediction
651
- context_window = extended_mean_pred[-window_size:]
652
-
653
- # Normalize the context window
654
- normalized_context = scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
655
 
 
 
 
 
656
  # Convert to tensor and ensure proper shape
 
657
  context = torch.tensor(normalized_context, dtype=dtype, device=device)
658
  if len(context.shape) == 1:
659
  context = context.unsqueeze(0)
660
 
661
  # Calculate next prediction length based on timeframe
662
  if timeframe == "1d":
663
- next_length = min(max_prediction_length, remaining_days)
664
  elif timeframe == "1h":
665
- next_length = min(max_prediction_length, remaining_days * 24)
666
- else: # 15m
667
- next_length = min(max_prediction_length, remaining_days * 96)
668
-
669
- # Make prediction for next window
670
  with torch.amp.autocast('cuda'):
671
  next_quantiles, next_mean = pipe.predict_quantiles(
672
  context=context,
@@ -685,38 +686,19 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
685
 
686
  # Calculate standard deviation
687
  next_std_pred = (next_upper - next_lower) / (2 * 1.645)
688
-
689
- # Apply exponential smoothing to reduce prediction drift
690
- if step > 0:
691
- alpha = 0.3 # Smoothing factor
692
- next_mean_pred = alpha * next_mean_pred + (1 - alpha) * extended_mean_pred[-len(next_mean_pred):]
693
- next_std_pred = alpha * next_std_pred + (1 - alpha) * extended_std_pred[-len(next_std_pred):]
694
-
695
  # Append predictions
696
  extended_mean_pred = np.concatenate([extended_mean_pred, next_mean_pred])
697
  extended_std_pred = np.concatenate([extended_std_pred, next_std_pred])
698
-
699
- # Update remaining days
700
- if timeframe == "1d":
701
- remaining_days -= len(next_mean_pred)
702
- elif timeframe == "1h":
703
- remaining_days -= len(next_mean_pred) / 24
704
- else: # 15m
705
- remaining_days -= len(next_mean_pred) / 96
706
-
707
- if remaining_days <= 0:
708
  break
709
 
710
  # Trim to exact prediction length if needed
711
- if timeframe == "1d":
712
- mean_pred = extended_mean_pred[:prediction_days]
713
- std_pred = extended_std_pred[:prediction_days]
714
- elif timeframe == "1h":
715
- mean_pred = extended_mean_pred[:prediction_days * 24]
716
- std_pred = extended_std_pred[:prediction_days * 24]
717
- else: # 15m
718
- mean_pred = extended_mean_pred[:prediction_days * 96]
719
- std_pred = extended_std_pred[:prediction_days * 96]
720
 
721
  # Extend Chronos forecasting to volume and technical indicators
722
  volume_pred = None
@@ -728,118 +710,276 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
728
  volume_data = df['Volume'].values
729
  if len(volume_data) >= 64:
730
  # Normalize volume data
 
 
731
  volume_scaler = MinMaxScaler(feature_range=(-1, 1))
732
- normalized_volume = volume_scaler.fit_transform(volume_data.reshape(-1, 1)).flatten()
733
-
734
- # Use last 64 points for volume prediction
735
- volume_context = normalized_volume[-64:]
736
- volume_context_tensor = torch.tensor(volume_context, dtype=dtype, device=device)
737
- if len(volume_context_tensor.shape) == 1:
738
- volume_context_tensor = volume_context_tensor.unsqueeze(0)
739
-
740
- # Predict volume
741
  with torch.amp.autocast('cuda'):
742
  volume_quantiles, volume_mean = pipe.predict_quantiles(
743
- context=volume_context_tensor,
744
- prediction_length=min(actual_prediction_length, 64),
745
  quantile_levels=[0.1, 0.5, 0.9]
746
  )
747
-
748
- # Convert and denormalize volume predictions
749
  volume_mean = volume_mean.detach().cpu().numpy()
750
  volume_pred = volume_scaler.inverse_transform(volume_mean.reshape(-1, 1)).flatten()
751
-
 
 
 
 
 
 
 
752
  # Extend volume predictions if needed
753
- if len(volume_pred) < len(mean_pred):
754
- last_volume = volume_pred[-1]
755
- extension_length = len(mean_pred) - len(volume_pred)
756
- volume_extension = np.full(extension_length, last_volume)
757
- volume_pred = np.concatenate([volume_pred, volume_extension])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
758
  except Exception as e:
759
  print(f"Volume prediction error: {str(e)}")
760
  # Fallback: use historical average
761
  avg_volume = df['Volume'].mean()
762
- volume_pred = np.full(len(mean_pred), avg_volume)
763
-
764
  try:
765
  # Prepare RSI data for Chronos
766
  rsi_data = df['RSI'].values
767
  if len(rsi_data) >= 64 and not np.any(np.isnan(rsi_data)):
768
  # RSI is already normalized (0-100), but we'll scale it to (-1, 1)
 
 
769
  rsi_scaler = MinMaxScaler(feature_range=(-1, 1))
770
- normalized_rsi = rsi_scaler.fit_transform(rsi_data.reshape(-1, 1)).flatten()
771
-
772
- # Use last 64 points for RSI prediction
773
- rsi_context = normalized_rsi[-64:]
774
- rsi_context_tensor = torch.tensor(rsi_context, dtype=dtype, device=device)
775
- if len(rsi_context_tensor.shape) == 1:
776
- rsi_context_tensor = rsi_context_tensor.unsqueeze(0)
777
-
778
- # Predict RSI
779
  with torch.amp.autocast('cuda'):
780
  rsi_quantiles, rsi_mean = pipe.predict_quantiles(
781
- context=rsi_context_tensor,
782
- prediction_length=min(actual_prediction_length, 64),
783
  quantile_levels=[0.1, 0.5, 0.9]
784
  )
785
-
786
  # Convert and denormalize RSI predictions
 
787
  rsi_mean = rsi_mean.detach().cpu().numpy()
788
  rsi_pred = rsi_scaler.inverse_transform(rsi_mean.reshape(-1, 1)).flatten()
789
-
790
  # Clamp RSI to valid range (0-100)
 
 
 
791
  rsi_pred = np.clip(rsi_pred, 0, 100)
792
-
 
 
 
793
  # Extend RSI predictions if needed
794
- if len(rsi_pred) < len(mean_pred):
795
- last_rsi = rsi_pred[-1]
796
- extension_length = len(mean_pred) - len(rsi_pred)
797
- rsi_extension = np.full(extension_length, last_rsi)
798
- rsi_pred = np.concatenate([rsi_pred, rsi_extension])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
799
  except Exception as e:
800
  print(f"RSI prediction error: {str(e)}")
801
  # Fallback: use last known RSI value
802
  last_rsi = df['RSI'].iloc[-1]
803
- rsi_pred = np.full(len(mean_pred), last_rsi)
804
-
805
  try:
806
  # Prepare MACD data for Chronos
807
  macd_data = df['MACD'].values
808
  if len(macd_data) >= 64 and not np.any(np.isnan(macd_data)):
809
  # Normalize MACD data
 
 
810
  macd_scaler = MinMaxScaler(feature_range=(-1, 1))
811
- normalized_macd = macd_scaler.fit_transform(macd_data.reshape(-1, 1)).flatten()
812
-
813
- # Use last 64 points for MACD prediction
814
- macd_context = normalized_macd[-64:]
815
- macd_context_tensor = torch.tensor(macd_context, dtype=dtype, device=device)
816
- if len(macd_context_tensor.shape) == 1:
817
- macd_context_tensor = macd_context_tensor.unsqueeze(0)
818
-
819
- # Predict MACD
820
  with torch.amp.autocast('cuda'):
821
  macd_quantiles, macd_mean = pipe.predict_quantiles(
822
- context=macd_context_tensor,
823
- prediction_length=min(actual_prediction_length, 64),
824
  quantile_levels=[0.1, 0.5, 0.9]
825
  )
826
-
827
  # Convert and denormalize MACD predictions
 
828
  macd_mean = macd_mean.detach().cpu().numpy()
829
  macd_pred = macd_scaler.inverse_transform(macd_mean.reshape(-1, 1)).flatten()
830
-
 
 
 
 
 
831
  # Extend MACD predictions if needed
832
- if len(macd_pred) < len(mean_pred):
833
- last_macd = macd_pred[-1]
834
- extension_length = len(mean_pred) - len(macd_pred)
835
- macd_extension = np.full(extension_length, last_macd)
836
- macd_pred = np.concatenate([macd_pred, macd_extension])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
837
  except Exception as e:
838
  print(f"MACD prediction error: {str(e)}")
839
  # Fallback: use last known MACD value
840
  last_macd = df['MACD'].iloc[-1]
841
- macd_pred = np.full(len(mean_pred), last_macd)
842
-
843
  except Exception as e:
844
  print(f"Chronos prediction error: {str(e)}")
845
  print(f"Error type: {type(e)}")
@@ -1756,6 +1896,13 @@ def create_interface():
1756
  label="Market Index for Correlation",
1757
  value="^GSPC"
1758
  )
 
 
 
 
 
 
 
1759
 
1760
  with gr.Column():
1761
  gr.Markdown("### Ensemble Weights")
@@ -1969,7 +2116,8 @@ def create_interface():
1969
 
1970
  def analyze_stock(symbol, timeframe, prediction_days, lookback_days, strategy,
1971
  use_ensemble, use_regime_detection, use_stress_testing,
1972
- risk_free_rate, market_index, chronos_weight, technical_weight, statistical_weight):
 
1973
  try:
1974
  # Create ensemble weights
1975
  ensemble_weights = {
@@ -1993,7 +2141,8 @@ def create_interface():
1993
  use_stress_testing=use_stress_testing,
1994
  risk_free_rate=risk_free_rate,
1995
  ensemble_weights=ensemble_weights,
1996
- market_index=market_index
 
1997
  )
1998
 
1999
  # Get historical data for additional metrics
@@ -2075,7 +2224,8 @@ def create_interface():
2075
 
2076
  # Daily analysis button click
2077
  def daily_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2078
- rfr: float, mi: str, cw: float, tw: float, sw: float) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
 
2079
  """
2080
  Process daily timeframe stock analysis with advanced features.
2081
 
@@ -2109,6 +2259,7 @@ def create_interface():
2109
  Weight given to technical analysis predictions in ensemble methods
2110
  sw (float): Statistical weight in ensemble (0.0-1.0)
2111
  Weight given to statistical model predictions in ensemble methods
 
2112
 
2113
  Returns:
2114
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
@@ -2128,7 +2279,7 @@ def create_interface():
2128
 
2129
  Example:
2130
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = daily_analysis(
2131
- ... "AAPL", 30, 365, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2
2132
  ... )
2133
 
2134
  Notes:
@@ -2138,20 +2289,22 @@ def create_interface():
2138
  - Ensemble weights should sum to 1.0 for optimal results
2139
  - Risk-free rate is typically between 0.02-0.05 (2-5% annually)
2140
  """
2141
- return analyze_stock(s, "1d", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw)
2142
 
2143
  daily_predict_btn.click(
2144
  fn=daily_analysis,
2145
  inputs=[daily_symbol, daily_prediction_days, daily_lookback_days, daily_strategy,
2146
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2147
- chronos_weight, technical_weight, statistical_weight],
 
2148
  outputs=[daily_signals, daily_plot, daily_metrics, daily_risk_metrics, daily_sector_metrics,
2149
  daily_regime_metrics, daily_stress_results, daily_ensemble_metrics, daily_signals_advanced]
2150
  )
2151
 
2152
  # Hourly analysis button click
2153
  def hourly_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2154
- rfr: float, mi: str, cw: float, tw: float, sw: float) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
 
2155
  """
2156
  Process hourly timeframe stock analysis with advanced features.
2157
 
@@ -2185,6 +2338,7 @@ def create_interface():
2185
  Weight for technical analysis in ensemble predictions
2186
  sw (float): Statistical weight in ensemble (0.0-1.0)
2187
  Weight for statistical models in ensemble predictions
 
2188
 
2189
  Returns:
2190
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
@@ -2204,7 +2358,7 @@ def create_interface():
2204
 
2205
  Example:
2206
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = hourly_analysis(
2207
- ... "AAPL", 3, 14, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2
2208
  ... )
2209
 
2210
  Notes:
@@ -2215,20 +2369,22 @@ def create_interface():
2215
  - Optimized for day trading and swing trading strategies
2216
  - Requires high-liquidity stocks for reliable hourly analysis
2217
  """
2218
- return analyze_stock(s, "1h", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw)
2219
 
2220
  hourly_predict_btn.click(
2221
  fn=hourly_analysis,
2222
  inputs=[hourly_symbol, hourly_prediction_days, hourly_lookback_days, hourly_strategy,
2223
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2224
- chronos_weight, technical_weight, statistical_weight],
 
2225
  outputs=[hourly_signals, hourly_plot, hourly_metrics, hourly_risk_metrics, hourly_sector_metrics,
2226
  hourly_regime_metrics, hourly_stress_results, hourly_ensemble_metrics, hourly_signals_advanced]
2227
  )
2228
 
2229
  # 15-minute analysis button click
2230
  def min15_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2231
- rfr: float, mi: str, cw: float, tw: float, sw: float) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
 
2232
  """
2233
  Process 15-minute timeframe stock analysis with advanced features.
2234
 
@@ -2262,6 +2418,7 @@ def create_interface():
2262
  Weight for technical analysis in ensemble predictions
2263
  sw (float): Statistical weight in ensemble (0.0-1.0)
2264
  Weight for statistical models in ensemble predictions
 
2265
 
2266
  Returns:
2267
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
@@ -2281,7 +2438,7 @@ def create_interface():
2281
 
2282
  Example:
2283
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = min15_analysis(
2284
- ... "AAPL", 1, 3, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2
2285
  ... )
2286
 
2287
  Notes:
@@ -2294,13 +2451,14 @@ def create_interface():
2294
  - Higher transaction costs and slippage considerations for 15-minute strategies
2295
  - Best suited for highly liquid large-cap stocks with tight bid-ask spreads
2296
  """
2297
- return analyze_stock(s, "15m", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw)
2298
 
2299
  min15_predict_btn.click(
2300
  fn=min15_analysis,
2301
  inputs=[min15_symbol, min15_prediction_days, min15_lookback_days, min15_strategy,
2302
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2303
- chronos_weight, technical_weight, statistical_weight],
 
2304
  outputs=[min15_signals, min15_plot, min15_metrics, min15_risk_metrics, min15_sector_metrics,
2305
  min15_regime_metrics, min15_stress_results, min15_ensemble_metrics, min15_signals_advanced]
2306
  )
 
370
  def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5, strategy: str = "chronos",
371
  use_ensemble: bool = True, use_regime_detection: bool = True, use_stress_testing: bool = True,
372
  risk_free_rate: float = 0.02, ensemble_weights: Dict = None,
373
+ market_index: str = "^GSPC",
374
+ random_real_points: int = 4) -> Tuple[Dict, go.Figure]:
375
  """
376
  Make prediction using selected strategy with advanced features.
377
 
 
386
  risk_free_rate (float): Risk-free rate for calculations
387
  ensemble_weights (Dict): Weights for ensemble models
388
  market_index (str): Market index for correlation analysis
389
+ random_real_points (int): Number of random real points to include in long-horizon context
390
 
391
  Returns:
392
  Tuple[Dict, go.Figure]: Trading signals and visualization plot
 
399
  try:
400
  # Prepare data for Chronos
401
  prices = df['Close'].values
402
+ window_size = 64 # Chronos context window size
403
+ context_window = prices[-window_size:]
404
+ scaler = MinMaxScaler(feature_range=(-1, 1))
405
+ normalized_prices = scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
406
 
407
  # Ensure we have enough data points
408
+ min_data_points = window_size
409
  if len(normalized_prices) < min_data_points:
410
  padding = np.full(min_data_points - len(normalized_prices), normalized_prices[-1])
411
  normalized_prices = np.concatenate([padding, normalized_prices])
 
426
 
427
  # Adjust prediction length based on timeframe
428
  if timeframe == "1d":
429
+ max_prediction_length = window_size # 64 days
 
 
 
 
 
 
 
 
 
 
430
  actual_prediction_length = min(prediction_days, max_prediction_length)
431
+ trim_length = prediction_days
432
  elif timeframe == "1h":
433
+ max_prediction_length = window_size # 64 hours
434
  actual_prediction_length = min(prediction_days * 24, max_prediction_length)
435
+ trim_length = prediction_days * 24
436
  else: # 15m
437
+ max_prediction_length = window_size # 64 intervals
438
  actual_prediction_length = min(prediction_days * 96, max_prediction_length)
439
+ trim_length = prediction_days * 96
440
  actual_prediction_length = max(1, actual_prediction_length)
441
 
442
  # Use predict_quantiles with proper formatting
 
627
  quantiles = quantiles.detach().cpu().numpy()
628
  mean = mean.detach().cpu().numpy()
629
 
630
+ # Denormalize predictions using the same scaler as context
631
  mean_pred = scaler.inverse_transform(mean.reshape(-1, 1)).flatten()
632
  lower_bound = scaler.inverse_transform(quantiles[0, :, 0].reshape(-1, 1)).flatten()
633
  upper_bound = scaler.inverse_transform(quantiles[0, :, 2].reshape(-1, 1)).flatten()
 
635
  # Calculate standard deviation from quantiles
636
  std_pred = (upper_bound - lower_bound) / (2 * 1.645)
637
 
638
+ # Check for discontinuity between last actual and first prediction
639
+ last_actual = prices[-1]
640
+ first_pred = mean_pred[0]
641
+ if abs(first_pred - last_actual) > max(1e-6, 0.05 * abs(last_actual)):
642
+ print(f"Warning: Discontinuity detected between last actual ({last_actual}) and first prediction ({first_pred})")
643
+
644
+ # If we had to limit the prediction length, extend the prediction recursively
645
+ if actual_prediction_length < trim_length:
646
  extended_mean_pred = mean_pred.copy()
647
  extended_std_pred = std_pred.copy()
648
 
649
  # Calculate the number of extension steps needed
650
+ remaining_steps = trim_length - actual_prediction_length
651
+ steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
 
652
  for step in range(steps_needed):
 
 
 
 
 
653
 
654
+ # Use last window_size points as context for next prediction
655
+ context_window = np.concatenate([prices, extended_mean_pred])[-window_size:]
656
+ scaler = MinMaxScaler(feature_range=(-1, 1))
657
+
658
  # Convert to tensor and ensure proper shape
659
+ normalized_context = scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
660
  context = torch.tensor(normalized_context, dtype=dtype, device=device)
661
  if len(context.shape) == 1:
662
  context = context.unsqueeze(0)
663
 
664
  # Calculate next prediction length based on timeframe
665
  if timeframe == "1d":
666
+ next_length = min(max_prediction_length, remaining_steps)
667
  elif timeframe == "1h":
668
+ next_length = min(max_prediction_length, remaining_steps)
669
+ else:
670
+ next_length = min(max_prediction_length, remaining_steps)
 
 
671
  with torch.amp.autocast('cuda'):
672
  next_quantiles, next_mean = pipe.predict_quantiles(
673
  context=context,
 
686
 
687
  # Calculate standard deviation
688
  next_std_pred = (next_upper - next_lower) / (2 * 1.645)
689
+ if abs(next_mean_pred[0] - extended_mean_pred[-1]) > max(1e-6, 0.05 * abs(extended_mean_pred[-1])):
690
+ print(f"Warning: Discontinuity detected between last prediction ({extended_mean_pred[-1]}) and next prediction ({next_mean_pred[0]})")
691
+
 
 
 
 
692
  # Append predictions
693
  extended_mean_pred = np.concatenate([extended_mean_pred, next_mean_pred])
694
  extended_std_pred = np.concatenate([extended_std_pred, next_std_pred])
695
+ remaining_steps -= len(next_mean_pred)
696
+ if remaining_steps <= 0:
 
 
 
 
 
 
 
 
697
  break
698
 
699
  # Trim to exact prediction length if needed
700
+ mean_pred = extended_mean_pred[:trim_length]
701
+ std_pred = extended_std_pred[:trim_length]
 
 
 
 
 
 
 
702
 
703
  # Extend Chronos forecasting to volume and technical indicators
704
  volume_pred = None
 
710
  volume_data = df['Volume'].values
711
  if len(volume_data) >= 64:
712
  # Normalize volume data
713
+ window_size = 64
714
+ context_window = volume_data[-window_size:]
715
  volume_scaler = MinMaxScaler(feature_range=(-1, 1))
716
+ normalized_volume = volume_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
717
+ if len(normalized_volume) < window_size:
718
+ padding = np.full(window_size - len(normalized_volume), normalized_volume[-1])
719
+ normalized_volume = np.concatenate([padding, normalized_volume])
720
+ elif len(normalized_volume) > window_size:
721
+ normalized_volume = normalized_volume[-window_size:]
722
+ volume_context = torch.tensor(normalized_volume, dtype=dtype, device=device)
723
+ if len(volume_context.shape) == 1:
724
+ volume_context = volume_context.unsqueeze(0)
725
  with torch.amp.autocast('cuda'):
726
  volume_quantiles, volume_mean = pipe.predict_quantiles(
727
+ context=volume_context,
728
+ prediction_length=actual_prediction_length,
729
  quantile_levels=[0.1, 0.5, 0.9]
730
  )
731
+ volume_quantiles = volume_quantiles.detach().cpu().numpy()
 
732
  volume_mean = volume_mean.detach().cpu().numpy()
733
  volume_pred = volume_scaler.inverse_transform(volume_mean.reshape(-1, 1)).flatten()
734
+ lower_bound = volume_scaler.inverse_transform(volume_quantiles[0, :, 0].reshape(-1, 1)).flatten()
735
+ upper_bound = volume_scaler.inverse_transform(volume_quantiles[0, :, 2].reshape(-1, 1)).flatten()
736
+ std_pred_vol = (upper_bound - lower_bound) / (2 * 1.645)
737
+ last_actual = volume_data[-1]
738
+ first_pred = volume_pred[0]
739
+ if abs(first_pred - last_actual) > max(1e-6, 0.05 * abs(last_actual)):
740
+ print(f"Warning: Discontinuity detected between last actual volume ({last_actual}) and first prediction ({first_pred})")
741
+
742
  # Extend volume predictions if needed
743
+ if actual_prediction_length < trim_length:
744
+ extended_mean_pred = volume_pred.copy()
745
+ extended_std_pred = std_pred_vol.copy()
746
+ remaining_steps = trim_length - actual_prediction_length
747
+ steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
748
+ for step in range(steps_needed):
749
+ # Use as much actual data as possible, then fill with predictions
750
+ n_actual = max(0, window_size - len(extended_mean_pred))
751
+ n_pred = window_size - n_actual
752
+ if n_actual > 0:
753
+ context_window = np.concatenate([
754
+ volume_data[-n_actual:],
755
+ extended_mean_pred[-n_pred:] if n_pred > 0 else np.array([])
756
+ ])
757
+ else:
758
+ # All synthetic, but add a few random real points
759
+ n_random_real = min(random_real_points, len(volume_data))
760
+ random_real = np.random.choice(volume_data, size=n_random_real, replace=False)
761
+ context_window = np.concatenate([
762
+ extended_mean_pred[-(window_size - n_random_real):],
763
+ random_real
764
+ ])
765
+ volume_scaler = MinMaxScaler(feature_range=(-1, 1))
766
+ normalized_context = volume_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
767
+ context = torch.tensor(normalized_context, dtype=dtype, device=device)
768
+ if len(context.shape) == 1:
769
+ context = context.unsqueeze(0)
770
+ next_length = min(window_size, remaining_steps)
771
+ with torch.amp.autocast('cuda'):
772
+ next_quantiles, next_mean = pipe.predict_quantiles(
773
+ context=context,
774
+ prediction_length=next_length,
775
+ quantile_levels=[0.1, 0.5, 0.9]
776
+ )
777
+ next_mean = next_mean.detach().cpu().numpy()
778
+ next_quantiles = next_quantiles.detach().cpu().numpy()
779
+ next_mean_pred = volume_scaler.inverse_transform(next_mean.reshape(-1, 1)).flatten()
780
+ next_lower = volume_scaler.inverse_transform(next_quantiles[0, :, 0].reshape(-1, 1)).flatten()
781
+ next_upper = volume_scaler.inverse_transform(next_quantiles[0, :, 2].reshape(-1, 1)).flatten()
782
+ next_std_pred = (next_upper - next_lower) / (2 * 1.645)
783
+ if abs(next_mean_pred[0] - extended_mean_pred[-1]) > max(1e-6, 0.05 * abs(extended_mean_pred[-1])):
784
+ print(f"Warning: Discontinuity detected between last volume prediction ({extended_mean_pred[-1]}) and next prediction ({next_mean_pred[0]})")
785
+ extended_mean_pred = np.concatenate([extended_mean_pred, next_mean_pred])
786
+ extended_std_pred = np.concatenate([extended_std_pred, next_std_pred])
787
+ remaining_steps -= len(next_mean_pred)
788
+ if remaining_steps <= 0:
789
+ break
790
+ volume_pred = extended_mean_pred[:trim_length]
791
+ else:
792
+ avg_volume = df['Volume'].mean()
793
+ volume_pred = np.full(trim_length, avg_volume)
794
  except Exception as e:
795
  print(f"Volume prediction error: {str(e)}")
796
  # Fallback: use historical average
797
  avg_volume = df['Volume'].mean()
798
+ volume_pred = np.full(trim_length, avg_volume)
 
799
  try:
800
  # Prepare RSI data for Chronos
801
  rsi_data = df['RSI'].values
802
  if len(rsi_data) >= 64 and not np.any(np.isnan(rsi_data)):
803
  # RSI is already normalized (0-100), but we'll scale it to (-1, 1)
804
+ window_size = 64
805
+ context_window = rsi_data[-window_size:]
806
  rsi_scaler = MinMaxScaler(feature_range=(-1, 1))
807
+ normalized_rsi = rsi_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
808
+ if len(normalized_rsi) < window_size:
809
+ padding = np.full(window_size - len(normalized_rsi), normalized_rsi[-1])
810
+ normalized_rsi = np.concatenate([padding, normalized_rsi])
811
+ elif len(normalized_rsi) > window_size:
812
+ normalized_rsi = normalized_rsi[-window_size:]
813
+ rsi_context = torch.tensor(normalized_rsi, dtype=dtype, device=device)
814
+ if len(rsi_context.shape) == 1:
815
+ rsi_context = rsi_context.unsqueeze(0)
816
  with torch.amp.autocast('cuda'):
817
  rsi_quantiles, rsi_mean = pipe.predict_quantiles(
818
+ context=rsi_context,
819
+ prediction_length=actual_prediction_length,
820
  quantile_levels=[0.1, 0.5, 0.9]
821
  )
 
822
  # Convert and denormalize RSI predictions
823
+ rsi_quantiles = rsi_quantiles.detach().cpu().numpy()
824
  rsi_mean = rsi_mean.detach().cpu().numpy()
825
  rsi_pred = rsi_scaler.inverse_transform(rsi_mean.reshape(-1, 1)).flatten()
 
826
  # Clamp RSI to valid range (0-100)
827
+ lower_bound = rsi_scaler.inverse_transform(rsi_quantiles[0, :, 0].reshape(-1, 1)).flatten()
828
+ upper_bound = rsi_scaler.inverse_transform(rsi_quantiles[0, :, 2].reshape(-1, 1)).flatten()
829
+ std_pred_rsi = (upper_bound - lower_bound) / (2 * 1.645)
830
  rsi_pred = np.clip(rsi_pred, 0, 100)
831
+ last_actual = rsi_data[-1]
832
+ first_pred = rsi_pred[0]
833
+ if abs(first_pred - last_actual) > max(1e-6, 0.05 * abs(last_actual)):
834
+ print(f"Warning: Discontinuity detected between last actual RSI ({last_actual}) and first prediction ({first_pred})")
835
  # Extend RSI predictions if needed
836
+ if actual_prediction_length < trim_length:
837
+ extended_mean_pred = rsi_pred.copy()
838
+ extended_std_pred = std_pred_rsi.copy()
839
+ remaining_steps = trim_length - actual_prediction_length
840
+ steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
841
+ for step in range(steps_needed):
842
+ n_actual = max(0, window_size - len(extended_mean_pred))
843
+ n_pred = window_size - n_actual
844
+ if n_actual > 0:
845
+ context_window = np.concatenate([
846
+ rsi_data[-n_actual:],
847
+ extended_mean_pred[-n_pred:] if n_pred > 0 else np.array([])
848
+ ])
849
+ else:
850
+ # All synthetic, but add a few random real points
851
+ n_random_real = min(random_real_points, len(rsi_data))
852
+ random_real = np.random.choice(rsi_data, size=n_random_real, replace=False)
853
+ context_window = np.concatenate([
854
+ extended_mean_pred[-(window_size - n_random_real):],
855
+ random_real
856
+ ])
857
+ rsi_scaler = MinMaxScaler(feature_range=(-1, 1))
858
+ normalized_context = rsi_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
859
+ context = torch.tensor(normalized_context, dtype=dtype, device=device)
860
+ if len(context.shape) == 1:
861
+ context = context.unsqueeze(0)
862
+ next_length = min(window_size, remaining_steps)
863
+ with torch.amp.autocast('cuda'):
864
+ next_quantiles, next_mean = pipe.predict_quantiles(
865
+ context=context,
866
+ prediction_length=next_length,
867
+ quantile_levels=[0.1, 0.5, 0.9]
868
+ )
869
+ next_mean = next_mean.detach().cpu().numpy()
870
+ next_quantiles = next_quantiles.detach().cpu().numpy()
871
+ next_mean_pred = rsi_scaler.inverse_transform(next_mean.reshape(-1, 1)).flatten()
872
+ next_lower = rsi_scaler.inverse_transform(next_quantiles[0, :, 0].reshape(-1, 1)).flatten()
873
+ next_upper = rsi_scaler.inverse_transform(next_quantiles[0, :, 2].reshape(-1, 1)).flatten()
874
+ next_std_pred = (next_upper - next_lower) / (2 * 1.645)
875
+ next_mean_pred = np.clip(next_mean_pred, 0, 100)
876
+ if abs(next_mean_pred[0] - extended_mean_pred[-1]) > max(1e-6, 0.05 * abs(extended_mean_pred[-1])):
877
+ print(f"Warning: Discontinuity detected between last RSI prediction ({extended_mean_pred[-1]}) and next prediction ({next_mean_pred[0]})")
878
+ extended_mean_pred = np.concatenate([extended_mean_pred, next_mean_pred])
879
+ extended_std_pred = np.concatenate([extended_std_pred, next_std_pred])
880
+ remaining_steps -= len(next_mean_pred)
881
+ if remaining_steps <= 0:
882
+ break
883
+ rsi_pred = extended_mean_pred[:trim_length]
884
+ else:
885
+ last_rsi = df['RSI'].iloc[-1]
886
+ rsi_pred = np.full(trim_length, last_rsi)
887
  except Exception as e:
888
  print(f"RSI prediction error: {str(e)}")
889
  # Fallback: use last known RSI value
890
  last_rsi = df['RSI'].iloc[-1]
891
+ rsi_pred = np.full(trim_length, last_rsi)
 
892
  try:
893
  # Prepare MACD data for Chronos
894
  macd_data = df['MACD'].values
895
  if len(macd_data) >= 64 and not np.any(np.isnan(macd_data)):
896
  # Normalize MACD data
897
+ window_size = 64
898
+ context_window = macd_data[-window_size:]
899
  macd_scaler = MinMaxScaler(feature_range=(-1, 1))
900
+ normalized_macd = macd_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
901
+ if len(normalized_macd) < window_size:
902
+ padding = np.full(window_size - len(normalized_macd), normalized_macd[-1])
903
+ normalized_macd = np.concatenate([padding, normalized_macd])
904
+ elif len(normalized_macd) > window_size:
905
+ normalized_macd = normalized_macd[-window_size:]
906
+ macd_context = torch.tensor(normalized_macd, dtype=dtype, device=device)
907
+ if len(macd_context.shape) == 1:
908
+ macd_context = macd_context.unsqueeze(0)
909
  with torch.amp.autocast('cuda'):
910
  macd_quantiles, macd_mean = pipe.predict_quantiles(
911
+ context=macd_context,
912
+ prediction_length=actual_prediction_length,
913
  quantile_levels=[0.1, 0.5, 0.9]
914
  )
 
915
  # Convert and denormalize MACD predictions
916
+ macd_quantiles = macd_quantiles.detach().cpu().numpy()
917
  macd_mean = macd_mean.detach().cpu().numpy()
918
  macd_pred = macd_scaler.inverse_transform(macd_mean.reshape(-1, 1)).flatten()
919
+ lower_bound = macd_scaler.inverse_transform(macd_quantiles[0, :, 0].reshape(-1, 1)).flatten()
920
+ upper_bound = macd_scaler.inverse_transform(macd_quantiles[0, :, 2].reshape(-1, 1)).flatten()
921
+ std_pred_macd = (upper_bound - lower_bound) / (2 * 1.645)
922
+ last_actual = macd_data[-1]
923
+ first_pred = macd_pred[0]
924
+
925
  # Extend MACD predictions if needed
926
+ if abs(first_pred - last_actual) > max(1e-6, 0.05 * abs(last_actual)):
927
+ print(f"Warning: Discontinuity detected between last actual MACD ({last_actual}) and first prediction ({first_pred})")
928
+ if actual_prediction_length < trim_length:
929
+ extended_mean_pred = macd_pred.copy()
930
+ extended_std_pred = std_pred_macd.copy()
931
+ remaining_steps = trim_length - actual_prediction_length
932
+ steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
933
+ for step in range(steps_needed):
934
+ n_actual = max(0, window_size - len(extended_mean_pred))
935
+ n_pred = window_size - n_actual
936
+ if n_actual > 0:
937
+ context_window = np.concatenate([
938
+ macd_data[-n_actual:],
939
+ extended_mean_pred[-n_pred:] if n_pred > 0 else np.array([])
940
+ ])
941
+ else:
942
+ # All synthetic, but add a few random real points
943
+ n_random_real = min(random_real_points, len(macd_data))
944
+ random_real = np.random.choice(macd_data, size=n_random_real, replace=False)
945
+ context_window = np.concatenate([
946
+ extended_mean_pred[-(window_size - n_random_real):],
947
+ random_real
948
+ ])
949
+ macd_scaler = MinMaxScaler(feature_range=(-1, 1))
950
+ normalized_context = macd_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
951
+ context = torch.tensor(normalized_context, dtype=dtype, device=device)
952
+ if len(context.shape) == 1:
953
+ context = context.unsqueeze(0)
954
+ next_length = min(window_size, remaining_steps)
955
+ with torch.amp.autocast('cuda'):
956
+ next_quantiles, next_mean = pipe.predict_quantiles(
957
+ context=context,
958
+ prediction_length=next_length,
959
+ quantile_levels=[0.1, 0.5, 0.9]
960
+ )
961
+ next_mean = next_mean.detach().cpu().numpy()
962
+ next_quantiles = next_quantiles.detach().cpu().numpy()
963
+ next_mean_pred = macd_scaler.inverse_transform(next_mean.reshape(-1, 1)).flatten()
964
+ next_lower = macd_scaler.inverse_transform(next_quantiles[0, :, 0].reshape(-1, 1)).flatten()
965
+ next_upper = macd_scaler.inverse_transform(next_quantiles[0, :, 2].reshape(-1, 1)).flatten()
966
+ next_std_pred = (next_upper - next_lower) / (2 * 1.645)
967
+ if abs(next_mean_pred[0] - extended_mean_pred[-1]) > max(1e-6, 0.05 * abs(extended_mean_pred[-1])):
968
+ print(f"Warning: Discontinuity detected between last MACD prediction ({extended_mean_pred[-1]}) and next prediction ({next_mean_pred[0]})")
969
+ extended_mean_pred = np.concatenate([extended_mean_pred, next_mean_pred])
970
+ extended_std_pred = np.concatenate([extended_std_pred, next_std_pred])
971
+ remaining_steps -= len(next_mean_pred)
972
+ if remaining_steps <= 0:
973
+ break
974
+ macd_pred = extended_mean_pred[:trim_length]
975
+ else:
976
+ last_macd = df['MACD'].iloc[-1]
977
+ macd_pred = np.full(trim_length, last_macd)
978
  except Exception as e:
979
  print(f"MACD prediction error: {str(e)}")
980
  # Fallback: use last known MACD value
981
  last_macd = df['MACD'].iloc[-1]
982
+ macd_pred = np.full(trim_length, last_macd)
 
983
  except Exception as e:
984
  print(f"Chronos prediction error: {str(e)}")
985
  print(f"Error type: {type(e)}")
 
1896
  label="Market Index for Correlation",
1897
  value="^GSPC"
1898
  )
1899
+ random_real_points = gr.Slider(
1900
+ minimum=0,
1901
+ maximum=16,
1902
+ value=4,
1903
+ step=1,
1904
+ label="Random Real Points in Long-Horizon Context"
1905
+ )
1906
 
1907
  with gr.Column():
1908
  gr.Markdown("### Ensemble Weights")
 
2116
 
2117
  def analyze_stock(symbol, timeframe, prediction_days, lookback_days, strategy,
2118
  use_ensemble, use_regime_detection, use_stress_testing,
2119
+ risk_free_rate, market_index, chronos_weight, technical_weight, statistical_weight,
2120
+ random_real_points):
2121
  try:
2122
  # Create ensemble weights
2123
  ensemble_weights = {
 
2141
  use_stress_testing=use_stress_testing,
2142
  risk_free_rate=risk_free_rate,
2143
  ensemble_weights=ensemble_weights,
2144
+ market_index=market_index,
2145
+ random_real_points=random_real_points
2146
  )
2147
 
2148
  # Get historical data for additional metrics
 
2224
 
2225
  # Daily analysis button click
2226
  def daily_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2227
+ rfr: float, mi: str, cw: float, tw: float, sw: float,
2228
+ rrp: int) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
2229
  """
2230
  Process daily timeframe stock analysis with advanced features.
2231
 
 
2259
  Weight given to technical analysis predictions in ensemble methods
2260
  sw (float): Statistical weight in ensemble (0.0-1.0)
2261
  Weight given to statistical model predictions in ensemble methods
2262
+ rrp (int): Number of random real points to include in long-horizon context
2263
 
2264
  Returns:
2265
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
 
2279
 
2280
  Example:
2281
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = daily_analysis(
2282
+ ... "AAPL", 30, 365, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4
2283
  ... )
2284
 
2285
  Notes:
 
2289
  - Ensemble weights should sum to 1.0 for optimal results
2290
  - Risk-free rate is typically between 0.02-0.05 (2-5% annually)
2291
  """
2292
+ return analyze_stock(s, "1d", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp)
2293
 
2294
  daily_predict_btn.click(
2295
  fn=daily_analysis,
2296
  inputs=[daily_symbol, daily_prediction_days, daily_lookback_days, daily_strategy,
2297
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2298
+ chronos_weight, technical_weight, statistical_weight,
2299
+ random_real_points],
2300
  outputs=[daily_signals, daily_plot, daily_metrics, daily_risk_metrics, daily_sector_metrics,
2301
  daily_regime_metrics, daily_stress_results, daily_ensemble_metrics, daily_signals_advanced]
2302
  )
2303
 
2304
  # Hourly analysis button click
2305
  def hourly_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2306
+ rfr: float, mi: str, cw: float, tw: float, sw: float,
2307
+ rrp: int) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
2308
  """
2309
  Process hourly timeframe stock analysis with advanced features.
2310
 
 
2338
  Weight for technical analysis in ensemble predictions
2339
  sw (float): Statistical weight in ensemble (0.0-1.0)
2340
  Weight for statistical models in ensemble predictions
2341
+ rrp (int): Number of random real points to include in long-horizon context
2342
 
2343
  Returns:
2344
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
 
2358
 
2359
  Example:
2360
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = hourly_analysis(
2361
+ ... "AAPL", 3, 14, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4
2362
  ... )
2363
 
2364
  Notes:
 
2369
  - Optimized for day trading and swing trading strategies
2370
  - Requires high-liquidity stocks for reliable hourly analysis
2371
  """
2372
+ return analyze_stock(s, "1h", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp)
2373
 
2374
  hourly_predict_btn.click(
2375
  fn=hourly_analysis,
2376
  inputs=[hourly_symbol, hourly_prediction_days, hourly_lookback_days, hourly_strategy,
2377
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2378
+ chronos_weight, technical_weight, statistical_weight,
2379
+ random_real_points],
2380
  outputs=[hourly_signals, hourly_plot, hourly_metrics, hourly_risk_metrics, hourly_sector_metrics,
2381
  hourly_regime_metrics, hourly_stress_results, hourly_ensemble_metrics, hourly_signals_advanced]
2382
  )
2383
 
2384
  # 15-minute analysis button click
2385
  def min15_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2386
+ rfr: float, mi: str, cw: float, tw: float, sw: float,
2387
+ rrp: int) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
2388
  """
2389
  Process 15-minute timeframe stock analysis with advanced features.
2390
 
 
2418
  Weight for technical analysis in ensemble predictions
2419
  sw (float): Statistical weight in ensemble (0.0-1.0)
2420
  Weight for statistical models in ensemble predictions
2421
+ rrp (int): Number of random real points to include in long-horizon context
2422
 
2423
  Returns:
2424
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
 
2438
 
2439
  Example:
2440
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = min15_analysis(
2441
+ ... "AAPL", 1, 3, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4
2442
  ... )
2443
 
2444
  Notes:
 
2451
  - Higher transaction costs and slippage considerations for 15-minute strategies
2452
  - Best suited for highly liquid large-cap stocks with tight bid-ask spreads
2453
  """
2454
+ return analyze_stock(s, "15m", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp)
2455
 
2456
  min15_predict_btn.click(
2457
  fn=min15_analysis,
2458
  inputs=[min15_symbol, min15_prediction_days, min15_lookback_days, min15_strategy,
2459
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2460
+ chronos_weight, technical_weight, statistical_weight,
2461
+ random_real_points],
2462
  outputs=[min15_signals, min15_plot, min15_metrics, min15_risk_metrics, min15_sector_metrics,
2463
  min15_regime_metrics, min15_stress_results, min15_ensemble_metrics, min15_signals_advanced]
2464
  )