Coder commited on
Commit
1d3e57b
·
1 Parent(s): f5efb0c

initial commit

Browse files
Files changed (1) hide show
  1. app.py +1 -24
app.py CHANGED
@@ -6,38 +6,32 @@ import yfinance as yf
6
  from sklearn.metrics import mean_absolute_error, mean_squared_error
7
  from prophet.plot import plot_plotly, plot_components_plotly
8
 
9
- # List of ticker symbols
10
  ticker_symbols = st.secrets["TICKER_SYMBOLS"].split(",")
11
 
12
- # Function to fetch stock data from Yahoo Finance
13
  def fetch_stock_data(ticker_symbol, start_date, end_date):
14
- ticker_symbol = ticker_symbol +".NS"
15
  stock_data = yf.download(ticker_symbol, start=start_date, end=end_date)
16
  df = stock_data[['Adj Close']].reset_index()
17
  df = df.rename(columns={'Date': 'ds', 'Adj Close': 'y'})
18
  # df.to_csv(f"{ticker_symbol}.csv")
19
  return df
20
 
21
- # Function to train the Prophet model
22
  def train_prophet_model(df):
23
  model = Prophet()
24
  model.fit(df)
25
  return model
26
 
27
- # Function to make the forecast
28
  def make_forecast(model, periods):
29
  future = model.make_future_dataframe(periods=periods)
30
  forecast = model.predict(future)
31
  return forecast
32
 
33
- # Function to calculate performance metrics
34
  def calculate_performance_metrics(actual, predicted):
35
  mae = mean_absolute_error(actual, predicted)
36
  mse = mean_squared_error(actual, predicted)
37
  rmse = np.sqrt(mse)
38
  return {'MAE': mae, 'MSE': mse, 'RMSE': rmse}
39
 
40
- # Function to determine sentiment
41
  def determine_sentiment(actual, predicted):
42
  if actual > predicted:
43
  sentiment = 'Negative'
@@ -48,19 +42,15 @@ def determine_sentiment(actual, predicted):
48
  return sentiment
49
 
50
 
51
- # Streamlit app
52
  def main():
53
  st.title('Stock Prediction on NSE Stocks')
54
 
55
- # Set up the layout
56
  st.sidebar.header('User Input Parameters')
57
  ticker_symbol = st.sidebar.selectbox('Enter Ticker Symbol', options=ticker_symbols, index=0)
58
 
59
- # Dropdown for training period selection
60
  training_period = st.sidebar.selectbox('Select Training Period',
61
  options=['1 week', '1 month', '1 year', '10 years'])
62
 
63
- # Calculate start date and end date based on training period
64
  if training_period == '1 week':
65
  start_date = pd.to_datetime('today') - pd.DateOffset(weeks=1)
66
  elif training_period == '1 month':
@@ -72,15 +62,12 @@ def main():
72
 
73
  end_date = pd.to_datetime('today')
74
 
75
- # Fetching the data for the selected training period
76
  df = fetch_stock_data(ticker_symbol, start_date, end_date)
77
 
78
- # Dropdown for forecast horizon selection
79
  forecast_horizon = st.sidebar.selectbox('Forecast Horizon',
80
  options=['Next day', 'Next week', 'Next month'],
81
  format_func=lambda x: x.capitalize())
82
 
83
- # Convert the selected horizon to days
84
  horizon_mapping = {'Next day': 1, 'Next week': 7, 'Next month': 30}
85
  forecast_days = horizon_mapping[forecast_horizon]
86
 
@@ -97,8 +84,6 @@ def main():
97
  """)
98
  st.subheader(f'Forecast Summary for {ticker_symbol}')
99
  latest_forecast = forecast_reversed.iloc[0]
100
-
101
- # Last Stock Price details with sentiment indicator
102
  actual_last_price = df["y"].iloc[-1]
103
  predicted_last_price = latest_forecast['yhat']
104
  sentiment = determine_sentiment(actual_last_price, predicted_last_price)
@@ -111,7 +96,6 @@ def main():
111
  else:
112
  st.info(f'Overall predication indicates neutral sentiment.')
113
 
114
- # Prediction details
115
  st.markdown(f"""
116
  **Prediction for {forecast_horizon.lower()}:**
117
 
@@ -128,8 +112,6 @@ def main():
128
  st.write(forecast_reversed)
129
 
130
 
131
- # Calculate performance metrics
132
- # Function to determine if performance metrics are in a good range
133
  def evaluate_performance_metrics(metrics):
134
  evaluation = {}
135
  evaluation['MAE'] = 'Good' if metrics['MAE'] < 0.05 * (df['y'].max() - df['y'].min()) else 'Not Good'
@@ -137,21 +119,17 @@ def main():
137
  evaluation['RMSE'] = 'Good' if metrics['RMSE'] < 0.1 * (df['y'].max() - df['y'].min()) else 'Not Good'
138
  return evaluation
139
 
140
- # Calculate performance metrics
141
  actual = df['y']
142
  predicted = forecast['yhat'][:len(df)]
143
  metrics = calculate_performance_metrics(actual, predicted)
144
 
145
- # Evaluate performance metrics
146
  evaluation = evaluate_performance_metrics(metrics)
147
 
148
  metrics = calculate_performance_metrics(actual, predicted)
149
  MAE =metrics['MAE']
150
  MSE = metrics['MSE']
151
  RMSE = metrics['RMSE']
152
-
153
 
154
- # Display evaluation
155
  st.subheader('Performance Evaluation')
156
  st.write('The metrics below provide a quantitative measure of the model’s accuracy:')
157
  maecolor = "green" if evaluation["MAE"] == "Good" else "red"
@@ -168,7 +146,6 @@ def main():
168
  st.markdown("(The square root of MSE, which is more interpretable in the same units as the target variable.)")
169
 
170
 
171
-
172
 
173
 
174
  # Run the main function
 
6
  from sklearn.metrics import mean_absolute_error, mean_squared_error
7
  from prophet.plot import plot_plotly, plot_components_plotly
8
 
 
9
  ticker_symbols = st.secrets["TICKER_SYMBOLS"].split(",")
10
 
 
11
  def fetch_stock_data(ticker_symbol, start_date, end_date):
12
+ ticker_symbol = ticker_symbol +st.secrets["TICKER_FLAG"]
13
  stock_data = yf.download(ticker_symbol, start=start_date, end=end_date)
14
  df = stock_data[['Adj Close']].reset_index()
15
  df = df.rename(columns={'Date': 'ds', 'Adj Close': 'y'})
16
  # df.to_csv(f"{ticker_symbol}.csv")
17
  return df
18
 
 
19
  def train_prophet_model(df):
20
  model = Prophet()
21
  model.fit(df)
22
  return model
23
 
 
24
  def make_forecast(model, periods):
25
  future = model.make_future_dataframe(periods=periods)
26
  forecast = model.predict(future)
27
  return forecast
28
 
 
29
  def calculate_performance_metrics(actual, predicted):
30
  mae = mean_absolute_error(actual, predicted)
31
  mse = mean_squared_error(actual, predicted)
32
  rmse = np.sqrt(mse)
33
  return {'MAE': mae, 'MSE': mse, 'RMSE': rmse}
34
 
 
35
  def determine_sentiment(actual, predicted):
36
  if actual > predicted:
37
  sentiment = 'Negative'
 
42
  return sentiment
43
 
44
 
 
45
  def main():
46
  st.title('Stock Prediction on NSE Stocks')
47
 
 
48
  st.sidebar.header('User Input Parameters')
49
  ticker_symbol = st.sidebar.selectbox('Enter Ticker Symbol', options=ticker_symbols, index=0)
50
 
 
51
  training_period = st.sidebar.selectbox('Select Training Period',
52
  options=['1 week', '1 month', '1 year', '10 years'])
53
 
 
54
  if training_period == '1 week':
55
  start_date = pd.to_datetime('today') - pd.DateOffset(weeks=1)
56
  elif training_period == '1 month':
 
62
 
63
  end_date = pd.to_datetime('today')
64
 
 
65
  df = fetch_stock_data(ticker_symbol, start_date, end_date)
66
 
 
67
  forecast_horizon = st.sidebar.selectbox('Forecast Horizon',
68
  options=['Next day', 'Next week', 'Next month'],
69
  format_func=lambda x: x.capitalize())
70
 
 
71
  horizon_mapping = {'Next day': 1, 'Next week': 7, 'Next month': 30}
72
  forecast_days = horizon_mapping[forecast_horizon]
73
 
 
84
  """)
85
  st.subheader(f'Forecast Summary for {ticker_symbol}')
86
  latest_forecast = forecast_reversed.iloc[0]
 
 
87
  actual_last_price = df["y"].iloc[-1]
88
  predicted_last_price = latest_forecast['yhat']
89
  sentiment = determine_sentiment(actual_last_price, predicted_last_price)
 
96
  else:
97
  st.info(f'Overall predication indicates neutral sentiment.')
98
 
 
99
  st.markdown(f"""
100
  **Prediction for {forecast_horizon.lower()}:**
101
 
 
112
  st.write(forecast_reversed)
113
 
114
 
 
 
115
  def evaluate_performance_metrics(metrics):
116
  evaluation = {}
117
  evaluation['MAE'] = 'Good' if metrics['MAE'] < 0.05 * (df['y'].max() - df['y'].min()) else 'Not Good'
 
119
  evaluation['RMSE'] = 'Good' if metrics['RMSE'] < 0.1 * (df['y'].max() - df['y'].min()) else 'Not Good'
120
  return evaluation
121
 
 
122
  actual = df['y']
123
  predicted = forecast['yhat'][:len(df)]
124
  metrics = calculate_performance_metrics(actual, predicted)
125
 
 
126
  evaluation = evaluate_performance_metrics(metrics)
127
 
128
  metrics = calculate_performance_metrics(actual, predicted)
129
  MAE =metrics['MAE']
130
  MSE = metrics['MSE']
131
  RMSE = metrics['RMSE']
 
132
 
 
133
  st.subheader('Performance Evaluation')
134
  st.write('The metrics below provide a quantitative measure of the model’s accuracy:')
135
  maecolor = "green" if evaluation["MAE"] == "Good" else "red"
 
146
  st.markdown("(The square root of MSE, which is more interpretable in the same units as the target variable.)")
147
 
148
 
 
149
 
150
 
151
  # Run the main function