Tonic commited on
Commit
ef5069d
·
unverified ·
1 Parent(s): 73f8121

improve extended window, smoothing and context management

Browse files
Files changed (1) hide show
  1. app.py +455 -123
app.py CHANGED
@@ -371,7 +371,9 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
371
  use_ensemble: bool = True, use_regime_detection: bool = True, use_stress_testing: bool = True,
372
  risk_free_rate: float = 0.02, ensemble_weights: Dict = None,
373
  market_index: str = "^GSPC",
374
- random_real_points: int = 4, use_smoothing: bool = True) -> Tuple[Dict, go.Figure]:
 
 
375
  """
376
  Make prediction using selected strategy with advanced features.
377
 
@@ -388,6 +390,7 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
388
  market_index (str): Market index for correlation analysis
389
  random_real_points (int): Number of random real points to include in long-horizon context
390
  use_smoothing (bool): Whether to apply smoothing to predictions
 
391
 
392
  Returns:
393
  Tuple[Dict, go.Figure]: Trading signals and visualization plot
@@ -400,17 +403,22 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
400
  try:
401
  # Prepare data for Chronos
402
  prices = df['Close'].values
403
- window_size = 64 # Chronos context window size
 
 
404
  # Use a larger range for scaler fitting to get better normalization
405
- scaler_range = min(len(prices), window_size * 2) # Use up to 128 points for scaler
406
- context_window = prices[-window_size:]
 
 
 
407
  scaler = MinMaxScaler(feature_range=(-1, 1))
408
  # Fit scaler on a larger range for better normalization
409
  scaler.fit(prices[-scaler_range:].reshape(-1, 1))
410
  normalized_prices = scaler.transform(context_window.reshape(-1, 1)).flatten()
411
 
412
- # Ensure we have enough data points
413
- min_data_points = window_size
414
  if len(normalized_prices) < min_data_points:
415
  padding = np.full(min_data_points - len(normalized_prices), normalized_prices[-1])
416
  normalized_prices = np.concatenate([padding, normalized_prices])
@@ -431,15 +439,15 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
431
 
432
  # Adjust prediction length based on timeframe
433
  if timeframe == "1d":
434
- max_prediction_length = window_size # 64 days
435
  actual_prediction_length = min(prediction_days, max_prediction_length)
436
  trim_length = prediction_days
437
  elif timeframe == "1h":
438
- max_prediction_length = window_size # 64 hours
439
  actual_prediction_length = min(prediction_days * 24, max_prediction_length)
440
  trim_length = prediction_days * 24
441
  else: # 15m
442
- max_prediction_length = window_size # 64 intervals
443
  actual_prediction_length = min(prediction_days * 96, max_prediction_length)
444
  trim_length = prediction_days * 96
445
  actual_prediction_length = max(1, actual_prediction_length)
@@ -654,35 +662,38 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
654
  # Apply the same trend but starting from the last actual value
655
  for i in range(1, len(mean_pred)):
656
  mean_pred[i] = last_actual + original_trend * i
657
- # Add smoothing to prevent drift if enabled
658
- if use_smoothing and i > 1:
659
- smoothing_factor = 0.95
660
- mean_pred[i] = smoothing_factor * mean_pred[i] + (1 - smoothing_factor) * mean_pred[i-1]
661
 
662
  # If we had to limit the prediction length, extend the prediction recursively
663
  if actual_prediction_length < trim_length:
664
  extended_mean_pred = mean_pred.copy()
665
  extended_std_pred = std_pred.copy()
666
 
 
 
 
667
  # Calculate the number of extension steps needed
668
  remaining_steps = trim_length - actual_prediction_length
669
  steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
 
670
  for step in range(steps_needed):
671
-
672
- # Use all available datapoints for context, prioritizing actual data over predictions
673
  all_available_data = np.concatenate([prices, extended_mean_pred])
674
 
675
- # If we have more data than window_size, use the most recent window_size points
676
  # Otherwise, use all available data (this allows for longer context when available)
677
- if len(all_available_data) > window_size:
678
- context_window = all_available_data[-window_size:]
679
  else:
680
  context_window = all_available_data
681
 
682
- scaler = MinMaxScaler(feature_range=(-1, 1))
683
-
684
- # Convert to tensor and ensure proper shape
685
- normalized_context = scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
686
  context = torch.tensor(normalized_context, dtype=dtype, device=device)
687
  if len(context.shape) == 1:
688
  context = context.unsqueeze(0)
@@ -694,6 +705,7 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
694
  next_length = min(max_prediction_length, remaining_steps)
695
  else:
696
  next_length = min(max_prediction_length, remaining_steps)
 
697
  with torch.amp.autocast('cuda'):
698
  next_quantiles, next_mean = pipe.predict_quantiles(
699
  context=context,
@@ -701,20 +713,33 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
701
  quantile_levels=[0.1, 0.5, 0.9]
702
  )
703
 
704
- # Convert predictions to numpy and denormalize
705
  next_mean = next_mean.detach().cpu().numpy()
706
  next_quantiles = next_quantiles.detach().cpu().numpy()
707
 
708
- # Denormalize predictions
709
- next_mean_pred = scaler.inverse_transform(next_mean.reshape(-1, 1)).flatten()
710
- next_lower = scaler.inverse_transform(next_quantiles[0, :, 0].reshape(-1, 1)).flatten()
711
- next_upper = scaler.inverse_transform(next_quantiles[0, :, 2].reshape(-1, 1)).flatten()
712
 
713
  # Calculate standard deviation
714
  next_std_pred = (next_upper - next_lower) / (2 * 1.645)
 
 
715
  if abs(next_mean_pred[0] - extended_mean_pred[-1]) > max(1e-6, 0.05 * abs(extended_mean_pred[-1])):
716
  print(f"Warning: Discontinuity detected between last prediction ({extended_mean_pred[-1]}) and next prediction ({next_mean_pred[0]})")
717
-
 
 
 
 
 
 
 
 
 
 
 
718
  # Append predictions
719
  extended_mean_pred = np.concatenate([extended_mean_pred, next_mean_pred])
720
  extended_std_pred = np.concatenate([extended_std_pred, next_std_pred])
@@ -734,20 +759,19 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
734
  try:
735
  # Prepare volume data for Chronos
736
  volume_data = df['Volume'].values
737
- if len(volume_data) >= 64:
738
  # Normalize volume data
739
- window_size = 64
740
- scaler_range = min(len(volume_data), window_size * 2)
741
- context_window = volume_data[-window_size:]
742
  volume_scaler = MinMaxScaler(feature_range=(-1, 1))
743
  # Fit scaler on a larger range for better normalization
744
  volume_scaler.fit(volume_data[-scaler_range:].reshape(-1, 1))
745
  normalized_volume = volume_scaler.transform(context_window.reshape(-1, 1)).flatten()
746
- if len(normalized_volume) < window_size:
747
- padding = np.full(window_size - len(normalized_volume), normalized_volume[-1])
748
  normalized_volume = np.concatenate([padding, normalized_volume])
749
- elif len(normalized_volume) > window_size:
750
- normalized_volume = normalized_volume[-window_size:]
751
  volume_context = torch.tensor(normalized_volume, dtype=dtype, device=device)
752
  if len(volume_context.shape) == 1:
753
  volume_context = volume_context.unsqueeze(0)
@@ -765,7 +789,7 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
765
  std_pred_vol = (upper_bound - lower_bound) / (2 * 1.645)
766
  last_actual = volume_data[-1]
767
  first_pred = volume_pred[0]
768
- if abs(first_pred - last_actual) > max(1e-6, 0.005 * abs(last_actual)): # Further reduced threshold
769
  print(f"Warning: Discontinuity detected between last actual volume ({last_actual}) and first prediction ({first_pred})")
770
  # Apply continuity correction
771
  volume_pred[0] = last_actual
@@ -776,33 +800,38 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
776
  # Apply the same trend but starting from the last actual value
777
  for i in range(1, len(volume_pred)):
778
  volume_pred[i] = last_actual + original_trend * i
779
- # Add smoothing to prevent drift if enabled
780
- if use_smoothing and i > 1:
781
- smoothing_factor = 0.95
782
- volume_pred[i] = smoothing_factor * volume_pred[i] + (1 - smoothing_factor) * volume_pred[i-1]
 
783
  # Extend volume predictions if needed
784
  if actual_prediction_length < trim_length:
785
- extended_mean_pred = volume_pred.copy()
786
- extended_std_pred = std_pred_vol.copy()
787
  remaining_steps = trim_length - actual_prediction_length
788
  steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
 
789
  for step in range(steps_needed):
790
- # Use all available datapoints for context, prioritizing actual data over predictions
791
- all_available_data = np.concatenate([volume_data, extended_mean_pred])
 
792
 
793
- # If we have more data than window_size, use the most recent window_size points
794
  # Otherwise, use all available data (this allows for longer context when available)
795
- if len(all_available_data) > window_size:
796
- context_window = all_available_data[-window_size:]
797
  else:
798
  context_window = all_available_data
799
 
800
- volume_scaler = MinMaxScaler(feature_range=(-1, 1))
801
- normalized_context = volume_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
 
802
  context = torch.tensor(normalized_context, dtype=dtype, device=device)
803
  if len(context.shape) == 1:
804
  context = context.unsqueeze(0)
805
- next_length = min(window_size, remaining_steps)
 
806
  with torch.amp.autocast('cuda'):
807
  next_quantiles, next_mean = pipe.predict_quantiles(
808
  context=context,
@@ -815,14 +844,26 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
815
  next_lower = volume_scaler.inverse_transform(next_quantiles[0, :, 0].reshape(-1, 1)).flatten()
816
  next_upper = volume_scaler.inverse_transform(next_quantiles[0, :, 2].reshape(-1, 1)).flatten()
817
  next_std_pred = (next_upper - next_lower) / (2 * 1.645)
818
- if abs(next_mean_pred[0] - extended_mean_pred[-1]) > max(1e-6, 0.05 * abs(extended_mean_pred[-1])):
819
- print(f"Warning: Discontinuity detected between last volume prediction ({extended_mean_pred[-1]}) and next prediction ({next_mean_pred[0]})")
820
- extended_mean_pred = np.concatenate([extended_mean_pred, next_mean_pred])
821
- extended_std_pred = np.concatenate([extended_std_pred, next_std_pred])
 
 
 
 
 
 
 
 
 
 
 
 
822
  remaining_steps -= len(next_mean_pred)
823
  if remaining_steps <= 0:
824
  break
825
- volume_pred = extended_mean_pred[:trim_length]
826
  else:
827
  avg_volume = df['Volume'].mean()
828
  volume_pred = np.full(trim_length, avg_volume)
@@ -831,23 +872,23 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
831
  # Fallback: use historical average
832
  avg_volume = df['Volume'].mean()
833
  volume_pred = np.full(trim_length, avg_volume)
 
834
  try:
835
  # Prepare RSI data for Chronos
836
  rsi_data = df['RSI'].values
837
- if len(rsi_data) >= 64 and not np.any(np.isnan(rsi_data)):
838
  # RSI is already normalized (0-100), but we'll scale it to (-1, 1)
839
- window_size = 64
840
- scaler_range = min(len(rsi_data), window_size * 2)
841
- context_window = rsi_data[-window_size:]
842
  rsi_scaler = MinMaxScaler(feature_range=(-1, 1))
843
  # Fit scaler on a larger range for better normalization
844
  rsi_scaler.fit(rsi_data[-scaler_range:].reshape(-1, 1))
845
  normalized_rsi = rsi_scaler.transform(context_window.reshape(-1, 1)).flatten()
846
- if len(normalized_rsi) < window_size:
847
- padding = np.full(window_size - len(normalized_rsi), normalized_rsi[-1])
848
  normalized_rsi = np.concatenate([padding, normalized_rsi])
849
- elif len(normalized_rsi) > window_size:
850
- normalized_rsi = normalized_rsi[-window_size:]
851
  rsi_context = torch.tensor(normalized_rsi, dtype=dtype, device=device)
852
  if len(rsi_context.shape) == 1:
853
  rsi_context = rsi_context.unsqueeze(0)
@@ -868,7 +909,7 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
868
  rsi_pred = np.clip(rsi_pred, 0, 100)
869
  last_actual = rsi_data[-1]
870
  first_pred = rsi_pred[0]
871
- if abs(first_pred - last_actual) > max(1e-6, 0.005 * abs(last_actual)): # Further reduced threshold
872
  print(f"Warning: Discontinuity detected between last actual RSI ({last_actual}) and first prediction ({first_pred})")
873
  # Apply continuity correction
874
  rsi_pred[0] = last_actual
@@ -876,29 +917,34 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
876
  trend = rsi_pred[1] - first_pred
877
  rsi_pred[1:] = rsi_pred[1:] - first_pred + last_actual
878
  rsi_pred = np.clip(rsi_pred, 0, 100) # Re-clip after adjustment
 
879
  # Extend RSI predictions if needed
880
  if actual_prediction_length < trim_length:
881
- extended_mean_pred = rsi_pred.copy()
882
- extended_std_pred = std_pred_rsi.copy()
883
  remaining_steps = trim_length - actual_prediction_length
884
  steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
 
885
  for step in range(steps_needed):
886
- # Use all available datapoints for context, prioritizing actual data over predictions
887
- all_available_data = np.concatenate([rsi_data, extended_mean_pred])
 
888
 
889
- # If we have more data than window_size, use the most recent window_size points
890
  # Otherwise, use all available data (this allows for longer context when available)
891
- if len(all_available_data) > window_size:
892
- context_window = all_available_data[-window_size:]
893
  else:
894
  context_window = all_available_data
895
 
896
- rsi_scaler = MinMaxScaler(feature_range=(-1, 1))
897
- normalized_context = rsi_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
 
898
  context = torch.tensor(normalized_context, dtype=dtype, device=device)
899
  if len(context.shape) == 1:
900
  context = context.unsqueeze(0)
901
- next_length = min(window_size, remaining_steps)
 
902
  with torch.amp.autocast('cuda'):
903
  next_quantiles, next_mean = pipe.predict_quantiles(
904
  context=context,
@@ -912,14 +958,28 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
912
  next_upper = rsi_scaler.inverse_transform(next_quantiles[0, :, 2].reshape(-1, 1)).flatten()
913
  next_std_pred = (next_upper - next_lower) / (2 * 1.645)
914
  next_mean_pred = np.clip(next_mean_pred, 0, 100)
915
- if abs(next_mean_pred[0] - extended_mean_pred[-1]) > max(1e-6, 0.005 * abs(extended_mean_pred[-1])):
916
- print(f"Warning: Discontinuity detected between last RSI prediction ({extended_mean_pred[-1]}) and next prediction ({next_mean_pred[0]})")
917
- extended_mean_pred = np.concatenate([extended_mean_pred, next_mean_pred])
918
- extended_std_pred = np.concatenate([extended_std_pred, next_std_pred])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
919
  remaining_steps -= len(next_mean_pred)
920
  if remaining_steps <= 0:
921
  break
922
- rsi_pred = extended_mean_pred[:trim_length]
923
  else:
924
  last_rsi = df['RSI'].iloc[-1]
925
  rsi_pred = np.full(trim_length, last_rsi)
@@ -928,23 +988,23 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
928
  # Fallback: use last known RSI value
929
  last_rsi = df['RSI'].iloc[-1]
930
  rsi_pred = np.full(trim_length, last_rsi)
 
931
  try:
932
  # Prepare MACD data for Chronos
933
  macd_data = df['MACD'].values
934
- if len(macd_data) >= 64 and not np.any(np.isnan(macd_data)):
935
  # Normalize MACD data
936
- window_size = 64
937
- scaler_range = min(len(macd_data), window_size * 2)
938
- context_window = macd_data[-window_size:]
939
  macd_scaler = MinMaxScaler(feature_range=(-1, 1))
940
  # Fit scaler on a larger range for better normalization
941
  macd_scaler.fit(macd_data[-scaler_range:].reshape(-1, 1))
942
  normalized_macd = macd_scaler.transform(context_window.reshape(-1, 1)).flatten()
943
- if len(normalized_macd) < window_size:
944
- padding = np.full(window_size - len(normalized_macd), normalized_macd[-1])
945
  normalized_macd = np.concatenate([padding, normalized_macd])
946
- elif len(normalized_macd) > window_size:
947
- normalized_macd = normalized_macd[-window_size:]
948
  macd_context = torch.tensor(normalized_macd, dtype=dtype, device=device)
949
  if len(macd_context.shape) == 1:
950
  macd_context = macd_context.unsqueeze(0)
@@ -964,8 +1024,8 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
964
  last_actual = macd_data[-1]
965
  first_pred = macd_pred[0]
966
 
967
- # Extend MACD predictions if needed
968
- if abs(first_pred - last_actual) > max(1e-6, 0.005 * abs(last_actual)): # Further reduced threshold
969
  print(f"Warning: Discontinuity detected between last actual MACD ({last_actual}) and first prediction ({first_pred})")
970
  # Apply continuity correction
971
  macd_pred[0] = last_actual
@@ -976,32 +1036,38 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
976
  # Apply the same trend but starting from the last actual value
977
  for i in range(1, len(macd_pred)):
978
  macd_pred[i] = last_actual + original_trend * i
979
- # Add smoothing to prevent drift if enabled
980
- if use_smoothing and i > 1:
981
- smoothing_factor = 0.95
982
- macd_pred[i] = smoothing_factor * macd_pred[i] + (1 - smoothing_factor) * macd_pred[i-1]
 
 
983
  if actual_prediction_length < trim_length:
984
- extended_mean_pred = macd_pred.copy()
985
- extended_std_pred = std_pred_macd.copy()
986
  remaining_steps = trim_length - actual_prediction_length
987
  steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
 
988
  for step in range(steps_needed):
989
- # Use all available datapoints for context, prioritizing actual data over predictions
990
- all_available_data = np.concatenate([macd_data, extended_mean_pred])
 
991
 
992
- # If we have more data than window_size, use the most recent window_size points
993
  # Otherwise, use all available data (this allows for longer context when available)
994
- if len(all_available_data) > window_size:
995
- context_window = all_available_data[-window_size:]
996
  else:
997
  context_window = all_available_data
998
 
999
- macd_scaler = MinMaxScaler(feature_range=(-1, 1))
1000
- normalized_context = macd_scaler.fit_transform(context_window.reshape(-1, 1)).flatten()
 
1001
  context = torch.tensor(normalized_context, dtype=dtype, device=device)
1002
  if len(context.shape) == 1:
1003
  context = context.unsqueeze(0)
1004
- next_length = min(window_size, remaining_steps)
 
1005
  with torch.amp.autocast('cuda'):
1006
  next_quantiles, next_mean = pipe.predict_quantiles(
1007
  context=context,
@@ -1014,14 +1080,26 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
1014
  next_lower = macd_scaler.inverse_transform(next_quantiles[0, :, 0].reshape(-1, 1)).flatten()
1015
  next_upper = macd_scaler.inverse_transform(next_quantiles[0, :, 2].reshape(-1, 1)).flatten()
1016
  next_std_pred = (next_upper - next_lower) / (2 * 1.645)
1017
- if abs(next_mean_pred[0] - extended_mean_pred[-1]) > max(1e-6, 0.05 * abs(extended_mean_pred[-1])):
1018
- print(f"Warning: Discontinuity detected between last MACD prediction ({extended_mean_pred[-1]}) and next prediction ({next_mean_pred[0]})")
1019
- extended_mean_pred = np.concatenate([extended_mean_pred, next_mean_pred])
1020
- extended_std_pred = np.concatenate([extended_std_pred, next_std_pred])
 
 
 
 
 
 
 
 
 
 
 
 
1021
  remaining_steps -= len(next_mean_pred)
1022
  if remaining_steps <= 0:
1023
  break
1024
- macd_pred = extended_mean_pred[:trim_length]
1025
  else:
1026
  last_macd = df['MACD'].iloc[-1]
1027
  macd_pred = np.full(trim_length, last_macd)
@@ -1913,6 +1991,198 @@ def advanced_trading_signals(df: pd.DataFrame, regime_info: Dict = None) -> Dict
1913
  print(f"Advanced trading signals error: {str(e)}")
1914
  return {"error": str(e)}
1915
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1916
  def create_interface():
1917
  """Create the Gradio interface with separate tabs for different timeframes"""
1918
  with gr.Blocks(title="Advanced Stock Prediction Analysis") as demo:
@@ -1935,6 +2205,37 @@ def create_interface():
1935
  use_regime_detection = gr.Checkbox(label="Use Regime Detection", value=True)
1936
  use_stress_testing = gr.Checkbox(label="Use Stress Testing", value=True)
1937
  use_smoothing = gr.Checkbox(label="Use Smoothing", value=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1938
  risk_free_rate = gr.Slider(
1939
  minimum=0.0,
1940
  maximum=0.1,
@@ -2005,6 +2306,22 @@ def create_interface():
2005
  value="chronos"
2006
  )
2007
  daily_predict_btn = gr.Button("Analyze Stock")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2008
 
2009
  with gr.Column():
2010
  daily_plot = gr.Plot(label="Analysis and Prediction")
@@ -2168,7 +2485,7 @@ def create_interface():
2168
  def analyze_stock(symbol, timeframe, prediction_days, lookback_days, strategy,
2169
  use_ensemble, use_regime_detection, use_stress_testing,
2170
  risk_free_rate, market_index, chronos_weight, technical_weight, statistical_weight,
2171
- random_real_points, use_smoothing):
2172
  try:
2173
  # Create ensemble weights
2174
  ensemble_weights = {
@@ -2194,7 +2511,10 @@ def create_interface():
2194
  ensemble_weights=ensemble_weights,
2195
  market_index=market_index,
2196
  random_real_points=random_real_points,
2197
- use_smoothing=use_smoothing
 
 
 
2198
  )
2199
 
2200
  # Get historical data for additional metrics
@@ -2277,7 +2597,7 @@ def create_interface():
2277
  # Daily analysis button click
2278
  def daily_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2279
  rfr: float, mi: str, cw: float, tw: float, sw: float,
2280
- rrp: int, usm: bool) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
2281
  """
2282
  Process daily timeframe stock analysis with advanced features.
2283
 
@@ -2314,6 +2634,10 @@ def create_interface():
2314
  rrp (int): Number of random real points to include in long-horizon context
2315
  usm (bool): Use smoothing
2316
  When True, applies smoothing to predictions to reduce noise and improve continuity
 
 
 
 
2317
 
2318
  Returns:
2319
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
@@ -2333,7 +2657,7 @@ def create_interface():
2333
 
2334
  Example:
2335
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = daily_analysis(
2336
- ... "AAPL", 30, 365, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4, True
2337
  ... )
2338
 
2339
  Notes:
@@ -2344,14 +2668,14 @@ def create_interface():
2344
  - Risk-free rate is typically between 0.02-0.05 (2-5% annually)
2345
  - Smoothing helps reduce prediction noise but may reduce responsiveness to sudden changes
2346
  """
2347
- return analyze_stock(s, "1d", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp, usm)
2348
 
2349
  daily_predict_btn.click(
2350
  fn=daily_analysis,
2351
  inputs=[daily_symbol, daily_prediction_days, daily_lookback_days, daily_strategy,
2352
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2353
  chronos_weight, technical_weight, statistical_weight,
2354
- random_real_points, use_smoothing],
2355
  outputs=[daily_signals, daily_plot, daily_metrics, daily_risk_metrics, daily_sector_metrics,
2356
  daily_regime_metrics, daily_stress_results, daily_ensemble_metrics, daily_signals_advanced]
2357
  )
@@ -2359,7 +2683,7 @@ def create_interface():
2359
  # Hourly analysis button click
2360
  def hourly_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2361
  rfr: float, mi: str, cw: float, tw: float, sw: float,
2362
- rrp: int, usm: bool) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
2363
  """
2364
  Process hourly timeframe stock analysis with advanced features.
2365
 
@@ -2396,6 +2720,10 @@ def create_interface():
2396
  rrp (int): Number of random real points to include in long-horizon context
2397
  usm (bool): Use smoothing
2398
  When True, applies smoothing to predictions to reduce noise and improve continuity
 
 
 
 
2399
 
2400
  Returns:
2401
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
@@ -2415,7 +2743,7 @@ def create_interface():
2415
 
2416
  Example:
2417
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = hourly_analysis(
2418
- ... "AAPL", 3, 14, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4, True
2419
  ... )
2420
 
2421
  Notes:
@@ -2427,14 +2755,14 @@ def create_interface():
2427
  - Requires high-liquidity stocks for reliable hourly analysis
2428
  - Smoothing helps reduce prediction noise but may reduce responsiveness to sudden changes
2429
  """
2430
- return analyze_stock(s, "1h", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp, usm)
2431
 
2432
  hourly_predict_btn.click(
2433
  fn=hourly_analysis,
2434
  inputs=[hourly_symbol, hourly_prediction_days, hourly_lookback_days, hourly_strategy,
2435
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2436
  chronos_weight, technical_weight, statistical_weight,
2437
- random_real_points, use_smoothing],
2438
  outputs=[hourly_signals, hourly_plot, hourly_metrics, hourly_risk_metrics, hourly_sector_metrics,
2439
  hourly_regime_metrics, hourly_stress_results, hourly_ensemble_metrics, hourly_signals_advanced]
2440
  )
@@ -2442,7 +2770,7 @@ def create_interface():
2442
  # 15-minute analysis button click
2443
  def min15_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2444
  rfr: float, mi: str, cw: float, tw: float, sw: float,
2445
- rrp: int, usm: bool) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
2446
  """
2447
  Process 15-minute timeframe stock analysis with advanced features.
2448
 
@@ -2479,6 +2807,10 @@ def create_interface():
2479
  rrp (int): Number of random real points to include in long-horizon context
2480
  usm (bool): Use smoothing
2481
  When True, applies smoothing to predictions to reduce noise and improve continuity
 
 
 
 
2482
 
2483
  Returns:
2484
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
@@ -2498,7 +2830,7 @@ def create_interface():
2498
 
2499
  Example:
2500
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = min15_analysis(
2501
- ... "AAPL", 1, 3, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4, True
2502
  ... )
2503
 
2504
  Notes:
@@ -2512,14 +2844,14 @@ def create_interface():
2512
  - Best suited for highly liquid large-cap stocks with tight bid-ask spreads
2513
  - Smoothing helps reduce prediction noise but may reduce responsiveness to sudden changes
2514
  """
2515
- return analyze_stock(s, "15m", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp, usm)
2516
 
2517
  min15_predict_btn.click(
2518
  fn=min15_analysis,
2519
  inputs=[min15_symbol, min15_prediction_days, min15_lookback_days, min15_strategy,
2520
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2521
  chronos_weight, technical_weight, statistical_weight,
2522
- random_real_points, use_smoothing],
2523
  outputs=[min15_signals, min15_plot, min15_metrics, min15_risk_metrics, min15_sector_metrics,
2524
  min15_regime_metrics, min15_stress_results, min15_ensemble_metrics, min15_signals_advanced]
2525
  )
 
371
  use_ensemble: bool = True, use_regime_detection: bool = True, use_stress_testing: bool = True,
372
  risk_free_rate: float = 0.02, ensemble_weights: Dict = None,
373
  market_index: str = "^GSPC",
374
+ random_real_points: int = 4, use_smoothing: bool = True,
375
+ smoothing_type: str = "exponential", smoothing_window: int = 5,
376
+ smoothing_alpha: float = 0.3) -> Tuple[Dict, go.Figure]:
377
  """
378
  Make prediction using selected strategy with advanced features.
379
 
 
390
  market_index (str): Market index for correlation analysis
391
  random_real_points (int): Number of random real points to include in long-horizon context
392
  use_smoothing (bool): Whether to apply smoothing to predictions
393
+ smoothing_type (str): Type of smoothing to apply ('exponential', 'moving_average', 'kalman', 'savitzky_golay', 'none')
394
 
395
  Returns:
396
  Tuple[Dict, go.Figure]: Trading signals and visualization plot
 
403
  try:
404
  # Prepare data for Chronos
405
  prices = df['Close'].values
406
+ chronos_context_size = 64 # Chronos model's context window size (fixed at 64)
407
+ input_context_size = len(prices) # Available input data can be much larger
408
+
409
  # Use a larger range for scaler fitting to get better normalization
410
+ scaler_range = min(input_context_size, chronos_context_size * 2) # Use up to 128 points for scaler
411
+
412
+ # Select the most recent chronos_context_size points for the model input
413
+ context_window = prices[-chronos_context_size:]
414
+
415
  scaler = MinMaxScaler(feature_range=(-1, 1))
416
  # Fit scaler on a larger range for better normalization
417
  scaler.fit(prices[-scaler_range:].reshape(-1, 1))
418
  normalized_prices = scaler.transform(context_window.reshape(-1, 1)).flatten()
419
 
420
+ # Ensure we have enough data points for Chronos
421
+ min_data_points = chronos_context_size
422
  if len(normalized_prices) < min_data_points:
423
  padding = np.full(min_data_points - len(normalized_prices), normalized_prices[-1])
424
  normalized_prices = np.concatenate([padding, normalized_prices])
 
439
 
440
  # Adjust prediction length based on timeframe
441
  if timeframe == "1d":
442
+ max_prediction_length = chronos_context_size # 64 days
443
  actual_prediction_length = min(prediction_days, max_prediction_length)
444
  trim_length = prediction_days
445
  elif timeframe == "1h":
446
+ max_prediction_length = chronos_context_size # 64 hours
447
  actual_prediction_length = min(prediction_days * 24, max_prediction_length)
448
  trim_length = prediction_days * 24
449
  else: # 15m
450
+ max_prediction_length = chronos_context_size # 64 intervals
451
  actual_prediction_length = min(prediction_days * 96, max_prediction_length)
452
  trim_length = prediction_days * 96
453
  actual_prediction_length = max(1, actual_prediction_length)
 
662
  # Apply the same trend but starting from the last actual value
663
  for i in range(1, len(mean_pred)):
664
  mean_pred[i] = last_actual + original_trend * i
665
+
666
+ # Apply financial smoothing if enabled
667
+ if use_smoothing:
668
+ mean_pred = apply_financial_smoothing(mean_pred, smoothing_type, smoothing_window, smoothing_alpha, 3, use_smoothing)
669
 
670
  # If we had to limit the prediction length, extend the prediction recursively
671
  if actual_prediction_length < trim_length:
672
  extended_mean_pred = mean_pred.copy()
673
  extended_std_pred = std_pred.copy()
674
 
675
+ # Store the original scaler for consistency
676
+ original_scaler = scaler
677
+
678
  # Calculate the number of extension steps needed
679
  remaining_steps = trim_length - actual_prediction_length
680
  steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
681
+
682
  for step in range(steps_needed):
683
+ # Use all available datapoints for context, including predictions
684
+ # This allows the model to build upon its own predictions for better long-horizon forecasting
685
  all_available_data = np.concatenate([prices, extended_mean_pred])
686
 
687
+ # If we have more data than chronos_context_size, use the most recent chronos_context_size points
688
  # Otherwise, use all available data (this allows for longer context when available)
689
+ if len(all_available_data) > chronos_context_size:
690
+ context_window = all_available_data[-chronos_context_size:]
691
  else:
692
  context_window = all_available_data
693
 
694
+ # Use the original scaler to maintain consistency - fit on historical data only
695
+ # but transform the combined context window
696
+ normalized_context = original_scaler.transform(context_window.reshape(-1, 1)).flatten()
 
697
  context = torch.tensor(normalized_context, dtype=dtype, device=device)
698
  if len(context.shape) == 1:
699
  context = context.unsqueeze(0)
 
705
  next_length = min(max_prediction_length, remaining_steps)
706
  else:
707
  next_length = min(max_prediction_length, remaining_steps)
708
+
709
  with torch.amp.autocast('cuda'):
710
  next_quantiles, next_mean = pipe.predict_quantiles(
711
  context=context,
 
713
  quantile_levels=[0.1, 0.5, 0.9]
714
  )
715
 
716
+ # Convert predictions to numpy and denormalize using original scaler
717
  next_mean = next_mean.detach().cpu().numpy()
718
  next_quantiles = next_quantiles.detach().cpu().numpy()
719
 
720
+ # Denormalize predictions using the original scaler
721
+ next_mean_pred = original_scaler.inverse_transform(next_mean.reshape(-1, 1)).flatten()
722
+ next_lower = original_scaler.inverse_transform(next_quantiles[0, :, 0].reshape(-1, 1)).flatten()
723
+ next_upper = original_scaler.inverse_transform(next_quantiles[0, :, 2].reshape(-1, 1)).flatten()
724
 
725
  # Calculate standard deviation
726
  next_std_pred = (next_upper - next_lower) / (2 * 1.645)
727
+
728
+ # Check for discontinuity and apply continuity correction
729
  if abs(next_mean_pred[0] - extended_mean_pred[-1]) > max(1e-6, 0.05 * abs(extended_mean_pred[-1])):
730
  print(f"Warning: Discontinuity detected between last prediction ({extended_mean_pred[-1]}) and next prediction ({next_mean_pred[0]})")
731
+ # Apply continuity correction to first prediction
732
+ next_mean_pred[0] = extended_mean_pred[-1]
733
+ # Adjust subsequent predictions to maintain trend
734
+ if len(next_mean_pred) > 1:
735
+ original_trend = next_mean_pred[1] - next_mean_pred[0]
736
+ for i in range(1, len(next_mean_pred)):
737
+ next_mean_pred[i] = extended_mean_pred[-1] + original_trend * i
738
+
739
+ # Apply financial smoothing if enabled
740
+ if use_smoothing and len(next_mean_pred) > 1:
741
+ next_mean_pred = apply_financial_smoothing(next_mean_pred, smoothing_type, smoothing_window, smoothing_alpha, 3, use_smoothing)
742
+
743
  # Append predictions
744
  extended_mean_pred = np.concatenate([extended_mean_pred, next_mean_pred])
745
  extended_std_pred = np.concatenate([extended_std_pred, next_std_pred])
 
759
  try:
760
  # Prepare volume data for Chronos
761
  volume_data = df['Volume'].values
762
+ if len(volume_data) >= chronos_context_size:
763
  # Normalize volume data
764
+ scaler_range = min(len(volume_data), chronos_context_size * 2)
765
+ context_window = volume_data[-chronos_context_size:]
 
766
  volume_scaler = MinMaxScaler(feature_range=(-1, 1))
767
  # Fit scaler on a larger range for better normalization
768
  volume_scaler.fit(volume_data[-scaler_range:].reshape(-1, 1))
769
  normalized_volume = volume_scaler.transform(context_window.reshape(-1, 1)).flatten()
770
+ if len(normalized_volume) < chronos_context_size:
771
+ padding = np.full(chronos_context_size - len(normalized_volume), normalized_volume[-1])
772
  normalized_volume = np.concatenate([padding, normalized_volume])
773
+ elif len(normalized_volume) > chronos_context_size:
774
+ normalized_volume = normalized_volume[-chronos_context_size:]
775
  volume_context = torch.tensor(normalized_volume, dtype=dtype, device=device)
776
  if len(volume_context.shape) == 1:
777
  volume_context = volume_context.unsqueeze(0)
 
789
  std_pred_vol = (upper_bound - lower_bound) / (2 * 1.645)
790
  last_actual = volume_data[-1]
791
  first_pred = volume_pred[0]
792
+ if abs(first_pred - last_actual) > max(1e-6, 0.005 * abs(last_actual)):
793
  print(f"Warning: Discontinuity detected between last actual volume ({last_actual}) and first prediction ({first_pred})")
794
  # Apply continuity correction
795
  volume_pred[0] = last_actual
 
800
  # Apply the same trend but starting from the last actual value
801
  for i in range(1, len(volume_pred)):
802
  volume_pred[i] = last_actual + original_trend * i
803
+
804
+ # Apply financial smoothing if enabled
805
+ if use_smoothing:
806
+ volume_pred = apply_financial_smoothing(volume_pred, smoothing_type, smoothing_window, smoothing_alpha, 3, use_smoothing)
807
+
808
  # Extend volume predictions if needed
809
  if actual_prediction_length < trim_length:
810
+ extended_volume_pred = volume_pred.copy()
811
+ extended_volume_std = std_pred_vol.copy()
812
  remaining_steps = trim_length - actual_prediction_length
813
  steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
814
+
815
  for step in range(steps_needed):
816
+ # Use all available datapoints for context, including predictions
817
+ # This allows the model to build upon its own predictions for better long-horizon forecasting
818
+ all_available_data = np.concatenate([volume_data, extended_volume_pred])
819
 
820
+ # If we have more data than chronos_context_size, use the most recent chronos_context_size points
821
  # Otherwise, use all available data (this allows for longer context when available)
822
+ if len(all_available_data) > chronos_context_size:
823
+ context_window = all_available_data[-chronos_context_size:]
824
  else:
825
  context_window = all_available_data
826
 
827
+ # Use the original volume scaler to maintain consistency - fit on historical data only
828
+ # but transform the combined context window
829
+ normalized_context = volume_scaler.transform(context_window.reshape(-1, 1)).flatten()
830
  context = torch.tensor(normalized_context, dtype=dtype, device=device)
831
  if len(context.shape) == 1:
832
  context = context.unsqueeze(0)
833
+
834
+ next_length = min(chronos_context_size, remaining_steps)
835
  with torch.amp.autocast('cuda'):
836
  next_quantiles, next_mean = pipe.predict_quantiles(
837
  context=context,
 
844
  next_lower = volume_scaler.inverse_transform(next_quantiles[0, :, 0].reshape(-1, 1)).flatten()
845
  next_upper = volume_scaler.inverse_transform(next_quantiles[0, :, 2].reshape(-1, 1)).flatten()
846
  next_std_pred = (next_upper - next_lower) / (2 * 1.645)
847
+
848
+ # Check for discontinuity and apply continuity correction
849
+ if abs(next_mean_pred[0] - extended_volume_pred[-1]) > max(1e-6, 0.05 * abs(extended_volume_pred[-1])):
850
+ print(f"Warning: Discontinuity detected between last volume prediction ({extended_volume_pred[-1]}) and next prediction ({next_mean_pred[0]})")
851
+ next_mean_pred[0] = extended_volume_pred[-1]
852
+ if len(next_mean_pred) > 1:
853
+ original_trend = next_mean_pred[1] - next_mean_pred[0]
854
+ for i in range(1, len(next_mean_pred)):
855
+ next_mean_pred[i] = extended_volume_pred[-1] + original_trend * i
856
+
857
+ # Apply financial smoothing if enabled
858
+ if use_smoothing and len(next_mean_pred) > 1:
859
+ next_mean_pred = apply_financial_smoothing(next_mean_pred, smoothing_type, smoothing_window, smoothing_alpha, 3, use_smoothing)
860
+
861
+ extended_volume_pred = np.concatenate([extended_volume_pred, next_mean_pred])
862
+ extended_volume_std = np.concatenate([extended_volume_std, next_std_pred])
863
  remaining_steps -= len(next_mean_pred)
864
  if remaining_steps <= 0:
865
  break
866
+ volume_pred = extended_volume_pred[:trim_length]
867
  else:
868
  avg_volume = df['Volume'].mean()
869
  volume_pred = np.full(trim_length, avg_volume)
 
872
  # Fallback: use historical average
873
  avg_volume = df['Volume'].mean()
874
  volume_pred = np.full(trim_length, avg_volume)
875
+
876
  try:
877
  # Prepare RSI data for Chronos
878
  rsi_data = df['RSI'].values
879
+ if len(rsi_data) >= chronos_context_size and not np.any(np.isnan(rsi_data)):
880
  # RSI is already normalized (0-100), but we'll scale it to (-1, 1)
881
+ scaler_range = min(len(rsi_data), chronos_context_size * 2)
882
+ context_window = rsi_data[-chronos_context_size:]
 
883
  rsi_scaler = MinMaxScaler(feature_range=(-1, 1))
884
  # Fit scaler on a larger range for better normalization
885
  rsi_scaler.fit(rsi_data[-scaler_range:].reshape(-1, 1))
886
  normalized_rsi = rsi_scaler.transform(context_window.reshape(-1, 1)).flatten()
887
+ if len(normalized_rsi) < chronos_context_size:
888
+ padding = np.full(chronos_context_size - len(normalized_rsi), normalized_rsi[-1])
889
  normalized_rsi = np.concatenate([padding, normalized_rsi])
890
+ elif len(normalized_rsi) > chronos_context_size:
891
+ normalized_rsi = normalized_rsi[-chronos_context_size:]
892
  rsi_context = torch.tensor(normalized_rsi, dtype=dtype, device=device)
893
  if len(rsi_context.shape) == 1:
894
  rsi_context = rsi_context.unsqueeze(0)
 
909
  rsi_pred = np.clip(rsi_pred, 0, 100)
910
  last_actual = rsi_data[-1]
911
  first_pred = rsi_pred[0]
912
+ if abs(first_pred - last_actual) > max(1e-6, 0.005 * abs(last_actual)):
913
  print(f"Warning: Discontinuity detected between last actual RSI ({last_actual}) and first prediction ({first_pred})")
914
  # Apply continuity correction
915
  rsi_pred[0] = last_actual
 
917
  trend = rsi_pred[1] - first_pred
918
  rsi_pred[1:] = rsi_pred[1:] - first_pred + last_actual
919
  rsi_pred = np.clip(rsi_pred, 0, 100) # Re-clip after adjustment
920
+
921
  # Extend RSI predictions if needed
922
  if actual_prediction_length < trim_length:
923
+ extended_rsi_pred = rsi_pred.copy()
924
+ extended_rsi_std = std_pred_rsi.copy()
925
  remaining_steps = trim_length - actual_prediction_length
926
  steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
927
+
928
  for step in range(steps_needed):
929
+ # Use all available datapoints for context, including predictions
930
+ # This allows the model to build upon its own predictions for better long-horizon forecasting
931
+ all_available_data = np.concatenate([rsi_data, extended_rsi_pred])
932
 
933
+ # If we have more data than chronos_context_size, use the most recent chronos_context_size points
934
  # Otherwise, use all available data (this allows for longer context when available)
935
+ if len(all_available_data) > chronos_context_size:
936
+ context_window = all_available_data[-chronos_context_size:]
937
  else:
938
  context_window = all_available_data
939
 
940
+ # Use the original RSI scaler to maintain consistency - fit on historical data only
941
+ # but transform the combined context window
942
+ normalized_context = rsi_scaler.transform(context_window.reshape(-1, 1)).flatten()
943
  context = torch.tensor(normalized_context, dtype=dtype, device=device)
944
  if len(context.shape) == 1:
945
  context = context.unsqueeze(0)
946
+
947
+ next_length = min(chronos_context_size, remaining_steps)
948
  with torch.amp.autocast('cuda'):
949
  next_quantiles, next_mean = pipe.predict_quantiles(
950
  context=context,
 
958
  next_upper = rsi_scaler.inverse_transform(next_quantiles[0, :, 2].reshape(-1, 1)).flatten()
959
  next_std_pred = (next_upper - next_lower) / (2 * 1.645)
960
  next_mean_pred = np.clip(next_mean_pred, 0, 100)
961
+
962
+ # Check for discontinuity and apply continuity correction
963
+ if abs(next_mean_pred[0] - extended_rsi_pred[-1]) > max(1e-6, 0.005 * abs(extended_rsi_pred[-1])):
964
+ print(f"Warning: Discontinuity detected between last RSI prediction ({extended_rsi_pred[-1]}) and next prediction ({next_mean_pred[0]})")
965
+ next_mean_pred[0] = extended_rsi_pred[-1]
966
+ if len(next_mean_pred) > 1:
967
+ original_trend = next_mean_pred[1] - next_mean_pred[0]
968
+ for i in range(1, len(next_mean_pred)):
969
+ next_mean_pred[i] = extended_rsi_pred[-1] + original_trend * i
970
+ next_mean_pred = np.clip(next_mean_pred, 0, 100)
971
+
972
+ # Apply financial smoothing if enabled
973
+ if use_smoothing and len(next_mean_pred) > 1:
974
+ next_mean_pred = apply_financial_smoothing(next_mean_pred, smoothing_type, smoothing_window, smoothing_alpha, 3, use_smoothing)
975
+ next_mean_pred = np.clip(next_mean_pred, 0, 100)
976
+
977
+ extended_rsi_pred = np.concatenate([extended_rsi_pred, next_mean_pred])
978
+ extended_rsi_std = np.concatenate([extended_rsi_std, next_std_pred])
979
  remaining_steps -= len(next_mean_pred)
980
  if remaining_steps <= 0:
981
  break
982
+ rsi_pred = extended_rsi_pred[:trim_length]
983
  else:
984
  last_rsi = df['RSI'].iloc[-1]
985
  rsi_pred = np.full(trim_length, last_rsi)
 
988
  # Fallback: use last known RSI value
989
  last_rsi = df['RSI'].iloc[-1]
990
  rsi_pred = np.full(trim_length, last_rsi)
991
+
992
  try:
993
  # Prepare MACD data for Chronos
994
  macd_data = df['MACD'].values
995
+ if len(macd_data) >= chronos_context_size and not np.any(np.isnan(macd_data)):
996
  # Normalize MACD data
997
+ scaler_range = min(len(macd_data), chronos_context_size * 2)
998
+ context_window = macd_data[-chronos_context_size:]
 
999
  macd_scaler = MinMaxScaler(feature_range=(-1, 1))
1000
  # Fit scaler on a larger range for better normalization
1001
  macd_scaler.fit(macd_data[-scaler_range:].reshape(-1, 1))
1002
  normalized_macd = macd_scaler.transform(context_window.reshape(-1, 1)).flatten()
1003
+ if len(normalized_macd) < chronos_context_size:
1004
+ padding = np.full(chronos_context_size - len(normalized_macd), normalized_macd[-1])
1005
  normalized_macd = np.concatenate([padding, normalized_macd])
1006
+ elif len(normalized_macd) > chronos_context_size:
1007
+ normalized_macd = normalized_macd[-chronos_context_size:]
1008
  macd_context = torch.tensor(normalized_macd, dtype=dtype, device=device)
1009
  if len(macd_context.shape) == 1:
1010
  macd_context = macd_context.unsqueeze(0)
 
1024
  last_actual = macd_data[-1]
1025
  first_pred = macd_pred[0]
1026
 
1027
+ # Check for discontinuity and apply continuity correction
1028
+ if abs(first_pred - last_actual) > max(1e-6, 0.005 * abs(last_actual)):
1029
  print(f"Warning: Discontinuity detected between last actual MACD ({last_actual}) and first prediction ({first_pred})")
1030
  # Apply continuity correction
1031
  macd_pred[0] = last_actual
 
1036
  # Apply the same trend but starting from the last actual value
1037
  for i in range(1, len(macd_pred)):
1038
  macd_pred[i] = last_actual + original_trend * i
1039
+
1040
+ # Apply financial smoothing if enabled
1041
+ if use_smoothing:
1042
+ macd_pred = apply_financial_smoothing(macd_pred, smoothing_type, smoothing_window, smoothing_alpha, 3, use_smoothing)
1043
+
1044
+ # Extend MACD predictions if needed
1045
  if actual_prediction_length < trim_length:
1046
+ extended_macd_pred = macd_pred.copy()
1047
+ extended_macd_std = std_pred_macd.copy()
1048
  remaining_steps = trim_length - actual_prediction_length
1049
  steps_needed = (remaining_steps + actual_prediction_length - 1) // actual_prediction_length
1050
+
1051
  for step in range(steps_needed):
1052
+ # Use all available datapoints for context, including predictions
1053
+ # This allows the model to build upon its own predictions for better long-horizon forecasting
1054
+ all_available_data = np.concatenate([macd_data, extended_macd_pred])
1055
 
1056
+ # If we have more data than chronos_context_size, use the most recent chronos_context_size points
1057
  # Otherwise, use all available data (this allows for longer context when available)
1058
+ if len(all_available_data) > chronos_context_size:
1059
+ context_window = all_available_data[-chronos_context_size:]
1060
  else:
1061
  context_window = all_available_data
1062
 
1063
+ # Use the original MACD scaler to maintain consistency - fit on historical data only
1064
+ # but transform the combined context window
1065
+ normalized_context = macd_scaler.transform(context_window.reshape(-1, 1)).flatten()
1066
  context = torch.tensor(normalized_context, dtype=dtype, device=device)
1067
  if len(context.shape) == 1:
1068
  context = context.unsqueeze(0)
1069
+
1070
+ next_length = min(chronos_context_size, remaining_steps)
1071
  with torch.amp.autocast('cuda'):
1072
  next_quantiles, next_mean = pipe.predict_quantiles(
1073
  context=context,
 
1080
  next_lower = macd_scaler.inverse_transform(next_quantiles[0, :, 0].reshape(-1, 1)).flatten()
1081
  next_upper = macd_scaler.inverse_transform(next_quantiles[0, :, 2].reshape(-1, 1)).flatten()
1082
  next_std_pred = (next_upper - next_lower) / (2 * 1.645)
1083
+
1084
+ # Check for discontinuity and apply continuity correction
1085
+ if abs(next_mean_pred[0] - extended_macd_pred[-1]) > max(1e-6, 0.05 * abs(extended_macd_pred[-1])):
1086
+ print(f"Warning: Discontinuity detected between last MACD prediction ({extended_macd_pred[-1]}) and next prediction ({next_mean_pred[0]})")
1087
+ next_mean_pred[0] = extended_macd_pred[-1]
1088
+ if len(next_mean_pred) > 1:
1089
+ original_trend = next_mean_pred[1] - next_mean_pred[0]
1090
+ for i in range(1, len(next_mean_pred)):
1091
+ next_mean_pred[i] = extended_macd_pred[-1] + original_trend * i
1092
+
1093
+ # Apply financial smoothing if enabled
1094
+ if use_smoothing and len(next_mean_pred) > 1:
1095
+ next_mean_pred = apply_financial_smoothing(next_mean_pred, smoothing_type, smoothing_window, smoothing_alpha, 3, use_smoothing)
1096
+
1097
+ extended_macd_pred = np.concatenate([extended_macd_pred, next_mean_pred])
1098
+ extended_macd_std = np.concatenate([extended_macd_std, next_std_pred])
1099
  remaining_steps -= len(next_mean_pred)
1100
  if remaining_steps <= 0:
1101
  break
1102
+ macd_pred = extended_macd_pred[:trim_length]
1103
  else:
1104
  last_macd = df['MACD'].iloc[-1]
1105
  macd_pred = np.full(trim_length, last_macd)
 
1991
  print(f"Advanced trading signals error: {str(e)}")
1992
  return {"error": str(e)}
1993
 
1994
+ def apply_financial_smoothing(data: np.ndarray, smoothing_type: str = "exponential",
1995
+ window_size: int = 5, alpha: float = 0.3,
1996
+ poly_order: int = 3, use_smoothing: bool = True) -> np.ndarray:
1997
+ """
1998
+ Apply financial smoothing algorithms to time series data.
1999
+
2000
+ Args:
2001
+ data (np.ndarray): Input time series data
2002
+ smoothing_type (str): Type of smoothing to apply
2003
+ - 'exponential': Exponential moving average (good for trend following)
2004
+ - 'moving_average': Simple moving average (good for noise reduction)
2005
+ - 'kalman': Kalman filter (good for adaptive smoothing)
2006
+ - 'savitzky_golay': Savitzky-Golay filter (good for preserving peaks/valleys)
2007
+ - 'double_exponential': Double exponential smoothing (good for trend + seasonality)
2008
+ - 'triple_exponential': Triple exponential smoothing (Holt-Winters, good for complex patterns)
2009
+ - 'adaptive': Adaptive smoothing based on volatility
2010
+ - 'none': No smoothing applied
2011
+ window_size (int): Window size for moving average and Savitzky-Golay
2012
+ alpha (float): Smoothing factor for exponential methods (0-1)
2013
+ poly_order (int): Polynomial order for Savitzky-Golay filter
2014
+ use_smoothing (bool): Whether to apply smoothing
2015
+
2016
+ Returns:
2017
+ np.ndarray: Smoothed data
2018
+ """
2019
+ if not use_smoothing or smoothing_type == "none" or len(data) < 3:
2020
+ return data
2021
+
2022
+ try:
2023
+ if smoothing_type == "exponential":
2024
+ # Exponential Moving Average - good for trend following
2025
+ smoothed = np.zeros_like(data)
2026
+ smoothed[0] = data[0]
2027
+ for i in range(1, len(data)):
2028
+ smoothed[i] = alpha * data[i] + (1 - alpha) * smoothed[i-1]
2029
+ return smoothed
2030
+
2031
+ elif smoothing_type == "moving_average":
2032
+ # Simple Moving Average - good for noise reduction
2033
+ if len(data) < window_size:
2034
+ return data
2035
+
2036
+ smoothed = np.zeros_like(data)
2037
+ # Handle the beginning of the series
2038
+ for i in range(min(window_size - 1, len(data))):
2039
+ smoothed[i] = np.mean(data[:i+1])
2040
+
2041
+ # Apply moving average for the rest
2042
+ for i in range(window_size - 1, len(data)):
2043
+ smoothed[i] = np.mean(data[i-window_size+1:i+1])
2044
+ return smoothed
2045
+
2046
+ elif smoothing_type == "kalman":
2047
+ # Kalman Filter - adaptive smoothing
2048
+ if len(data) < 2:
2049
+ return data
2050
+
2051
+ # Initialize Kalman filter parameters
2052
+ Q = 0.01 # Process noise
2053
+ R = 0.1 # Measurement noise
2054
+ P = 1.0 # Initial estimate error
2055
+ x = data[0] # Initial state estimate
2056
+
2057
+ smoothed = np.zeros_like(data)
2058
+ smoothed[0] = x
2059
+
2060
+ for i in range(1, len(data)):
2061
+ # Prediction step
2062
+ x_pred = x
2063
+ P_pred = P + Q
2064
+
2065
+ # Update step
2066
+ K = P_pred / (P_pred + R) # Kalman gain
2067
+ x = x_pred + K * (data[i] - x_pred)
2068
+ P = (1 - K) * P_pred
2069
+
2070
+ smoothed[i] = x
2071
+
2072
+ return smoothed
2073
+
2074
+ elif smoothing_type == "savitzky_golay":
2075
+ # Savitzky-Golay filter - preserves peaks and valleys
2076
+ if len(data) < window_size:
2077
+ return data
2078
+
2079
+ # Ensure window_size is odd
2080
+ if window_size % 2 == 0:
2081
+ window_size += 1
2082
+
2083
+ # Ensure polynomial order is less than window_size
2084
+ if poly_order >= window_size:
2085
+ poly_order = window_size - 1
2086
+
2087
+ try:
2088
+ from scipy.signal import savgol_filter
2089
+ return savgol_filter(data, window_size, poly_order)
2090
+ except ImportError:
2091
+ # Fallback to simple moving average if scipy not available
2092
+ return apply_financial_smoothing(data, "moving_average", window_size)
2093
+
2094
+ elif smoothing_type == "double_exponential":
2095
+ # Double Exponential Smoothing (Holt's method) - trend + level
2096
+ if len(data) < 3:
2097
+ return data
2098
+
2099
+ smoothed = np.zeros_like(data)
2100
+ trend = np.zeros_like(data)
2101
+
2102
+ # Initialize
2103
+ smoothed[0] = data[0]
2104
+ trend[0] = data[1] - data[0] if len(data) > 1 else 0
2105
+
2106
+ # Apply double exponential smoothing
2107
+ for i in range(1, len(data)):
2108
+ prev_smoothed = smoothed[i-1]
2109
+ prev_trend = trend[i-1]
2110
+
2111
+ smoothed[i] = alpha * data[i] + (1 - alpha) * (prev_smoothed + prev_trend)
2112
+ trend[i] = alpha * (smoothed[i] - prev_smoothed) + (1 - alpha) * prev_trend
2113
+
2114
+ return smoothed
2115
+
2116
+ elif smoothing_type == "triple_exponential":
2117
+ # Triple Exponential Smoothing (Holt-Winters) - trend + level + seasonality
2118
+ if len(data) < 6:
2119
+ return apply_financial_smoothing(data, "double_exponential", window_size, alpha)
2120
+
2121
+ # For simplicity, we'll use a seasonal period of 5 (common for financial data)
2122
+ season_period = min(5, len(data) // 2)
2123
+
2124
+ smoothed = np.zeros_like(data)
2125
+ trend = np.zeros_like(data)
2126
+ season = np.zeros_like(data)
2127
+
2128
+ # Initialize
2129
+ smoothed[0] = data[0]
2130
+ trend[0] = (data[season_period] - data[0]) / season_period if len(data) > season_period else 0
2131
+
2132
+ # Initialize seasonal components
2133
+ for i in range(season_period):
2134
+ season[i] = data[i] - smoothed[0]
2135
+
2136
+ # Apply triple exponential smoothing
2137
+ for i in range(1, len(data)):
2138
+ prev_smoothed = smoothed[i-1]
2139
+ prev_trend = trend[i-1]
2140
+ prev_season = season[(i-1) % season_period]
2141
+
2142
+ smoothed[i] = alpha * (data[i] - prev_season) + (1 - alpha) * (prev_smoothed + prev_trend)
2143
+ trend[i] = alpha * (smoothed[i] - prev_smoothed) + (1 - alpha) * prev_trend
2144
+ season[i % season_period] = alpha * (data[i] - smoothed[i]) + (1 - alpha) * prev_season
2145
+
2146
+ return smoothed
2147
+
2148
+ elif smoothing_type == "adaptive":
2149
+ # Adaptive smoothing based on volatility
2150
+ if len(data) < 5:
2151
+ return data
2152
+
2153
+ # Calculate rolling volatility
2154
+ returns = np.diff(data) / data[:-1]
2155
+ volatility = np.zeros_like(data)
2156
+ volatility[0] = np.std(returns) if len(returns) > 0 else 0.01
2157
+
2158
+ for i in range(1, len(data)):
2159
+ if i < 5:
2160
+ volatility[i] = np.std(returns[:i]) if i > 0 else 0.01
2161
+ else:
2162
+ volatility[i] = np.std(returns[i-5:i])
2163
+
2164
+ # Normalize volatility to smoothing factor
2165
+ vol_factor = np.clip(volatility / np.mean(volatility), 0.1, 0.9)
2166
+ adaptive_alpha = 1 - vol_factor # Higher volatility = less smoothing
2167
+
2168
+ # Apply adaptive exponential smoothing
2169
+ smoothed = np.zeros_like(data)
2170
+ smoothed[0] = data[0]
2171
+
2172
+ for i in range(1, len(data)):
2173
+ current_alpha = adaptive_alpha[i]
2174
+ smoothed[i] = current_alpha * data[i] + (1 - current_alpha) * smoothed[i-1]
2175
+
2176
+ return smoothed
2177
+
2178
+ else:
2179
+ # Default to exponential smoothing
2180
+ return apply_financial_smoothing(data, "exponential", window_size, alpha)
2181
+
2182
+ except Exception as e:
2183
+ print(f"Smoothing error: {str(e)}")
2184
+ return data
2185
+
2186
  def create_interface():
2187
  """Create the Gradio interface with separate tabs for different timeframes"""
2188
  with gr.Blocks(title="Advanced Stock Prediction Analysis") as demo:
 
2205
  use_regime_detection = gr.Checkbox(label="Use Regime Detection", value=True)
2206
  use_stress_testing = gr.Checkbox(label="Use Stress Testing", value=True)
2207
  use_smoothing = gr.Checkbox(label="Use Smoothing", value=True)
2208
+ smoothing_type = gr.Dropdown(
2209
+ choices=["exponential", "moving_average", "kalman", "savitzky_golay",
2210
+ "double_exponential", "triple_exponential", "adaptive", "none"],
2211
+ label="Smoothing Type",
2212
+ value="exponential",
2213
+ info="""Smoothing algorithms:
2214
+ • Exponential: Trend following (default)
2215
+ • Moving Average: Noise reduction
2216
+ • Kalman: Adaptive smoothing
2217
+ • Savitzky-Golay: Preserves peaks/valleys
2218
+ • Double Exponential: Trend + level
2219
+ • Triple Exponential: Complex patterns
2220
+ • Adaptive: Volatility-based
2221
+ • None: No smoothing"""
2222
+ )
2223
+ smoothing_window = gr.Slider(
2224
+ minimum=3,
2225
+ maximum=21,
2226
+ value=5,
2227
+ step=1,
2228
+ label="Smoothing Window Size",
2229
+ info="Window size for moving average and Savitzky-Golay filters"
2230
+ )
2231
+ smoothing_alpha = gr.Slider(
2232
+ minimum=0.1,
2233
+ maximum=0.9,
2234
+ value=0.3,
2235
+ step=0.05,
2236
+ label="Smoothing Alpha",
2237
+ info="Smoothing factor for exponential methods (0.1-0.9)"
2238
+ )
2239
  risk_free_rate = gr.Slider(
2240
  minimum=0.0,
2241
  maximum=0.1,
 
2306
  value="chronos"
2307
  )
2308
  daily_predict_btn = gr.Button("Analyze Stock")
2309
+ gr.Markdown("""
2310
+ **Daily Analysis Features:**
2311
+ - **Extended Data Range**: Up to 10 years of historical data (3650 days)
2312
+ - **24/7 Availability**: Available regardless of market hours
2313
+ - **Auto-Adjusted Data**: Automatically adjusted for splits and dividends
2314
+ - **Comprehensive Financial Ratios**: P/E, PEG, Price-to-Book, Price-to-Sales, and more
2315
+ - **Advanced Risk Metrics**: Sharpe ratio, VaR, drawdown analysis, market correlation
2316
+ - **Market Regime Detection**: Identifies bull/bear/sideways market conditions
2317
+ - **Stress Testing**: Scenario analysis under various market conditions
2318
+ - **Ensemble Methods**: Combines multiple prediction models for improved accuracy
2319
+ - **Maximum prediction period**: 365 days
2320
+ - **Ideal for**: Medium to long-term investment analysis, portfolio management, and strategic planning
2321
+ - **Technical Indicators**: RSI, MACD, Bollinger Bands, moving averages optimized for daily data
2322
+ - **Volume Analysis**: Average daily volume, volume volatility, and liquidity metrics
2323
+ - **Sector Analysis**: Industry classification, market cap ranking, and sector-specific metrics
2324
+ """)
2325
 
2326
  with gr.Column():
2327
  daily_plot = gr.Plot(label="Analysis and Prediction")
 
2485
  def analyze_stock(symbol, timeframe, prediction_days, lookback_days, strategy,
2486
  use_ensemble, use_regime_detection, use_stress_testing,
2487
  risk_free_rate, market_index, chronos_weight, technical_weight, statistical_weight,
2488
+ random_real_points, use_smoothing, smoothing_type, smoothing_window, smoothing_alpha):
2489
  try:
2490
  # Create ensemble weights
2491
  ensemble_weights = {
 
2511
  ensemble_weights=ensemble_weights,
2512
  market_index=market_index,
2513
  random_real_points=random_real_points,
2514
+ use_smoothing=use_smoothing,
2515
+ smoothing_type=smoothing_type,
2516
+ smoothing_window=smoothing_window,
2517
+ smoothing_alpha=smoothing_alpha
2518
  )
2519
 
2520
  # Get historical data for additional metrics
 
2597
  # Daily analysis button click
2598
  def daily_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2599
  rfr: float, mi: str, cw: float, tw: float, sw: float,
2600
+ rrp: int, usm: bool, smt: str, sww: float, sa: float) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
2601
  """
2602
  Process daily timeframe stock analysis with advanced features.
2603
 
 
2634
  rrp (int): Number of random real points to include in long-horizon context
2635
  usm (bool): Use smoothing
2636
  When True, applies smoothing to predictions to reduce noise and improve continuity
2637
+ smt (str): Smoothing type to use
2638
+ Options: "exponential", "moving_average", "kalman", "savitzky_golay", "double_exponential", "triple_exponential", "adaptive", "none"
2639
+ sww (float): Smoothing window size for moving average and Savitzky-Golay
2640
+ sa (float): Smoothing alpha for exponential methods (0.1-0.9)
2641
 
2642
  Returns:
2643
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
 
2657
 
2658
  Example:
2659
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = daily_analysis(
2660
+ ... "AAPL", 30, 365, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4, True, "exponential", 5, 0.3
2661
  ... )
2662
 
2663
  Notes:
 
2668
  - Risk-free rate is typically between 0.02-0.05 (2-5% annually)
2669
  - Smoothing helps reduce prediction noise but may reduce responsiveness to sudden changes
2670
  """
2671
+ return analyze_stock(s, "1d", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp, usm, smt, sww, sa)
2672
 
2673
  daily_predict_btn.click(
2674
  fn=daily_analysis,
2675
  inputs=[daily_symbol, daily_prediction_days, daily_lookback_days, daily_strategy,
2676
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2677
  chronos_weight, technical_weight, statistical_weight,
2678
+ random_real_points, use_smoothing, smoothing_type, smoothing_window, smoothing_alpha],
2679
  outputs=[daily_signals, daily_plot, daily_metrics, daily_risk_metrics, daily_sector_metrics,
2680
  daily_regime_metrics, daily_stress_results, daily_ensemble_metrics, daily_signals_advanced]
2681
  )
 
2683
  # Hourly analysis button click
2684
  def hourly_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2685
  rfr: float, mi: str, cw: float, tw: float, sw: float,
2686
+ rrp: int, usm: bool, smt: str, sww: float, sa: float) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
2687
  """
2688
  Process hourly timeframe stock analysis with advanced features.
2689
 
 
2720
  rrp (int): Number of random real points to include in long-horizon context
2721
  usm (bool): Use smoothing
2722
  When True, applies smoothing to predictions to reduce noise and improve continuity
2723
+ smt (str): Smoothing type to use
2724
+ Options: "exponential", "moving_average", "kalman", "savitzky_golay", "double_exponential", "triple_exponential", "adaptive", "none"
2725
+ sww (float): Smoothing window size for moving average and Savitzky-Golay
2726
+ sa (float): Smoothing alpha for exponential methods (0.1-0.9)
2727
 
2728
  Returns:
2729
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
 
2743
 
2744
  Example:
2745
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = hourly_analysis(
2746
+ ... "AAPL", 3, 14, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4, True, "exponential", 5, 0.3
2747
  ... )
2748
 
2749
  Notes:
 
2755
  - Requires high-liquidity stocks for reliable hourly analysis
2756
  - Smoothing helps reduce prediction noise but may reduce responsiveness to sudden changes
2757
  """
2758
+ return analyze_stock(s, "1h", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp, usm, smt, sww, sa)
2759
 
2760
  hourly_predict_btn.click(
2761
  fn=hourly_analysis,
2762
  inputs=[hourly_symbol, hourly_prediction_days, hourly_lookback_days, hourly_strategy,
2763
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2764
  chronos_weight, technical_weight, statistical_weight,
2765
+ random_real_points, use_smoothing, smoothing_type, smoothing_window, smoothing_alpha],
2766
  outputs=[hourly_signals, hourly_plot, hourly_metrics, hourly_risk_metrics, hourly_sector_metrics,
2767
  hourly_regime_metrics, hourly_stress_results, hourly_ensemble_metrics, hourly_signals_advanced]
2768
  )
 
2770
  # 15-minute analysis button click
2771
  def min15_analysis(s: str, pd: int, ld: int, st: str, ue: bool, urd: bool, ust: bool,
2772
  rfr: float, mi: str, cw: float, tw: float, sw: float,
2773
+ rrp: int, usm: bool, smt: str, sww: float, sa: float) -> Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]:
2774
  """
2775
  Process 15-minute timeframe stock analysis with advanced features.
2776
 
 
2807
  rrp (int): Number of random real points to include in long-horizon context
2808
  usm (bool): Use smoothing
2809
  When True, applies smoothing to predictions to reduce noise and improve continuity
2810
+ smt (str): Smoothing type to use
2811
+ Options: "exponential", "moving_average", "kalman", "savitzky_golay", "double_exponential", "triple_exponential", "adaptive", "none"
2812
+ sww (float): Smoothing window size for moving average and Savitzky-Golay
2813
+ sa (float): Smoothing alpha for exponential methods (0.1-0.9)
2814
 
2815
  Returns:
2816
  Tuple[Dict, go.Figure, Dict, Dict, Dict, Dict, Dict, Dict, Dict]: Analysis results containing:
 
2830
 
2831
  Example:
2832
  >>> signals, plot, metrics, risk, sector, regime, stress, ensemble, advanced = min15_analysis(
2833
+ ... "AAPL", 1, 3, "chronos", True, True, True, 0.02, "^GSPC", 0.6, 0.2, 0.2, 4, True, "exponential", 5, 0.3
2834
  ... )
2835
 
2836
  Notes:
 
2844
  - Best suited for highly liquid large-cap stocks with tight bid-ask spreads
2845
  - Smoothing helps reduce prediction noise but may reduce responsiveness to sudden changes
2846
  """
2847
+ return analyze_stock(s, "15m", pd, ld, st, ue, urd, ust, rfr, mi, cw, tw, sw, rrp, usm, smt, sww, sa)
2848
 
2849
  min15_predict_btn.click(
2850
  fn=min15_analysis,
2851
  inputs=[min15_symbol, min15_prediction_days, min15_lookback_days, min15_strategy,
2852
  use_ensemble, use_regime_detection, use_stress_testing, risk_free_rate, market_index,
2853
  chronos_weight, technical_weight, statistical_weight,
2854
+ random_real_points, use_smoothing, smoothing_type, smoothing_window, smoothing_alpha],
2855
  outputs=[min15_signals, min15_plot, min15_metrics, min15_risk_metrics, min15_sector_metrics,
2856
  min15_regime_metrics, min15_stress_results, min15_ensemble_metrics, min15_signals_advanced]
2857
  )