Sanket45 commited on
Commit
90a8b51
1 Parent(s): 01a0fbf

upload page

Browse files
pages/1_Store Demand Forecasting.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from src.data import StoreDataLoader
3
+ from src.model import Model_Load
4
+ import matplotlib.pyplot as plt
5
+ import seaborn as sns
6
+ import plotly.graph_objects as go
7
+ from sklearn.metrics import mean_absolute_error,mean_squared_error
8
+ import numpy as np
9
+ import pandas as pd
10
+ from src.prediction import test_prediction,val_prediction,create_week_date_featues
11
+ import plotly.express as px
12
+ #-------------------------------------------------------------
13
+ ## Load model object
14
+ model_obj=Model_Load()
15
+ #--------------------------------------------------------------
16
+ @st.cache_data
17
+ def convert_df(df):
18
+ return df.to_csv(index=False).encode('utf-8')
19
+ #-----------------------------------------------------------------
20
+ ## Title of Page
21
+ st.markdown("""
22
+ <div style='text-align: center; margin-top:-70px; margin-bottom: -50px;margin-left: -50px;'>
23
+ <h2 style='font-size: 20px; font-family: Courier New, monospace;
24
+ letter-spacing: 2px; text-decoration: none;'>
25
+ <img src="https://acis.affineanalytics.co.in/assets/images/logo_small.png" alt="logo" width="70" height="30">
26
+ <span style='background: linear-gradient(45deg, #ed4965, #c05aaf);
27
+ -webkit-background-clip: text;
28
+ -webkit-text-fill-color: transparent;
29
+ text-shadow: none;'>
30
+ Product Demand Forecasting Dashboard
31
+ </span>
32
+ <span style='font-size: 40%;'>
33
+ <sup style='position: relative; top: 5px; color: #ed4965;'>by Affine</sup>
34
+ </span>
35
+ </h2>
36
+ </div>
37
+ """, unsafe_allow_html=True)
38
+ #---------------------------------------------------------------------------------------------------------------------
39
+ # select the model(Sidebar)
40
+ with st.sidebar:
41
+ st.markdown("""<div style='text-align: left; margin-top:-230px;margin-left:-40px;'>
42
+ <img src="https://affine.ai/wp-content/uploads/2023/05/Affine-Logo.svg" alt="logo" width="300" height="60">
43
+ </div>""", unsafe_allow_html=True)
44
+ option=st.selectbox("Select Model",['TFT','Prophet'])
45
+ #------------------------------------------------------------------------------------------------------------
46
+ # TFT
47
+ if option=='TFT':
48
+ #--------------------------------------------------------------------------------------------------------
49
+ ## TFT data path and load
50
+ path='data/train.csv'
51
+ obj=StoreDataLoader(path)
52
+ train_dataset,test_dataset,training,validation,earliest_time=obj.tft_data()
53
+ print(f"TRAINING ::START DATE ::{train_dataset['date'].min()} :: END DATE ::{train_dataset['date'].max()}")
54
+ print(f"TESTING ::START DATE ::{test_dataset['date'].min()} :: END DATE ::{test_dataset['date'].max()}")
55
+ list_store=train_dataset['store'].unique()
56
+ list_items=train_dataset['item'].unique()
57
+ #---------------------------------------------------------------------------------------------------------
58
+ try:
59
+ # load the pre trained tft model
60
+ model=model_obj.store_model_load(option)
61
+ with st.sidebar:
62
+ # st.success('Model Loaded successfully', icon="✅")
63
+ # select the store id
64
+ store=st.selectbox("Select Store ID",list_store)
65
+ # select the item id
66
+ item=st.selectbox("Select Product ID",list_items)
67
+ #--------------------------------------------------------------------------------------------------------------
68
+ ## prediction on testing data
69
+ testing_results=test_prediction(model,train_dataset=train_dataset,test_dataset=test_dataset
70
+ ,earliest_time=earliest_time,store_id=store,item_id=item)
71
+ # find kpi
72
+ rmse=np.around(np.sqrt(mean_squared_error(testing_results['Lead_1'],testing_results['prediction'])),2)
73
+ mae=np.around(mean_absolute_error(testing_results['Lead_1'],testing_results['prediction']),2)
74
+ print(f"TEST DATA = Item ID : {item} :: MAE : {mae} :: RMSE : {rmse}")
75
+ #--------------------------------------tft future prediction-------------------------------------------
76
+ final_data=pd.concat([train_dataset,test_dataset])
77
+ consumer_data=final_data.loc[(final_data['store']==store) & (final_data['item']==item)]
78
+ consumer_data.fillna(0,inplace=True)
79
+ date_list=[]
80
+ demand_prediction=[]
81
+ for i in range(30):
82
+ # select last 150 records as an enocer + decoder data
83
+ encoder_data = consumer_data[lambda x: x.days_from_start > x.days_from_start.max() - 150]
84
+ last_data = consumer_data[lambda x: x.days_from_start == x.days_from_start.max()]
85
+
86
+ # prediction date and time
87
+ date_list.append(encoder_data.tail(1).iloc[-1,:]['date'])
88
+ # prediction for the last 30 records
89
+ test_prediction = model.predict(encoder_data,
90
+ mode="prediction",
91
+ trainer_kwargs=dict(accelerator="cpu"),
92
+ return_x=True)
93
+ # create the next day record
94
+ decoder_data = pd.concat(
95
+ [last_data.assign(date=lambda x: x.date + pd.offsets.DateOffset(i)) for i in range(1, 2)],
96
+ ignore_index=True,
97
+ )
98
+ # find the hours_from_start & days_from_start
99
+ decoder_data["hours_from_start"] = (decoder_data["date"] - earliest_time).dt.seconds / 60 / 60 + (decoder_data["date"] - earliest_time).dt.days * 24
100
+ decoder_data['hours_from_start'] = decoder_data['hours_from_start'].astype('int')
101
+ decoder_data["hours_from_start"] += encoder_data["hours_from_start"].max() + 1 - decoder_data["hours_from_start"].min()
102
+ # add time index consistent with "data"
103
+ decoder_data["days_from_start"] = (decoder_data["date"] - earliest_time).apply(lambda x:x.days)
104
+ # adding the datetime features
105
+ decoder_data=create_week_date_featues(decoder_data,'date')
106
+ # last timestep predicted record as assume next day actual demand(for more day forecasting)
107
+ decoder_data['sales']=float(test_prediction.output[0][-1])
108
+ # append this prediction into the list
109
+ demand_prediction.append(float(test_prediction.output[0][-1]))
110
+ # update prediction time idx
111
+ decoder_data['time_idx']=int(test_prediction.x['decoder_time_idx'][0][-1])
112
+ # add the next day record into the original data
113
+ consumer_data=pd.concat([consumer_data,decoder_data])
114
+ # fina lag and update
115
+ consumer_data['lag_1']=consumer_data['sales'].shift(1)
116
+ consumer_data['lag_5']=consumer_data['sales'].shift(5)
117
+ # reset the index
118
+ consumer_data=consumer_data.reset_index(drop=True)
119
+ # forecast values for the next 30 days/timesteps
120
+ d2=pd.DataFrame({"date":date_list,"prediction":demand_prediction})[['date','prediction']]
121
+ # update the store and item ids
122
+ d2['store']=store
123
+ d2['item']=item
124
+ #----------------------------TFT and Prophet model KPI----------------------------------------
125
+ with st.sidebar:
126
+ st.markdown(f"""
127
+ <style>
128
+ /* Sidebar header style */
129
+ .sidebar-header {{
130
+ padding: 1px;
131
+ background-color: #9966FF;
132
+ text-align: center;
133
+ font-size: 13px;
134
+ font-weight: bold;
135
+ color: #FFF ;
136
+ }}
137
+ </style>
138
+
139
+ <div class="sidebar-header">
140
+ Models Evalution
141
+ </div>
142
+ """,unsafe_allow_html=True)
143
+ st.dataframe(pd.DataFrame({"KPI":['RMSE','MAE'],"TFT":[7.73,6.17],"Prophet":[7.32,6.01]}).set_index('KPI'),width=300)
144
+ # d2=pd.DataFrame({"KPI":['RMSE','MAE','RMSE','MAE'],"model":['TFT','TFT','Prophet','Prophet'],"Score":[7.73,6.17,7.32,6.01]})
145
+ # fig = px.bar(d2, x="KPI", y="Score",
146
+ # color='model', barmode='group',
147
+ # height=200,width=300,text_auto=True,)
148
+ # st.plotly_chart(fig)
149
+ #------------------------------------Prophet model KPI---------------------------------------------------------
150
+ st.markdown(f"""
151
+ <style>
152
+ /* Sidebar header style */
153
+ .sidebar-header {{
154
+ padding: 3px;
155
+ background-color:linear-gradient(45deg, #ed4965, #c05aaf);
156
+ text-align: center;
157
+ font-size: 13px;
158
+ font-weight: bold;
159
+ color: #FFF ;
160
+ }}
161
+ </style>
162
+
163
+ <div class="sidebar-header">
164
+ KPI :: {item}
165
+ </div>
166
+ """,unsafe_allow_html=True)
167
+ st.dataframe(pd.DataFrame({"KPI":['RMSE','MAE'],"TFT":[rmse,mae]}).set_index('KPI'),width=300)
168
+
169
+ #--------------------------------------------------------------------------------------------------------------
170
+ # tabs
171
+ tab1,tab2=st.tabs(['📈Forecast Plot','🗃Forecast Table'])
172
+ #------------------------------------------------Tab-1-----------------------------------------------------------
173
+ tab1.markdown("""
174
+ <div style='text-align: left; margin-top:-10px;margin-bottom:-10px;'>
175
+ <h2 style='font-size: 30px; font-family: Palatino, serif;
176
+ letter-spacing: 2px; text-decoration: none;'>
177
+ &#x1F4C8;
178
+ <span style='background: linear-gradient(45deg, #ed4965, #c05aaf);
179
+ -webkit-background-clip: text;
180
+ -webkit-text-fill-color: transparent;
181
+ text-shadow: none;'>
182
+ Forecast Plot
183
+ </span>
184
+ <span style='font-size: 40%;'>
185
+ <sup style='position: relative; top: 5px; color: #ed4965;'></sup>
186
+ </span>
187
+ </h2>
188
+ </div>
189
+ """, unsafe_allow_html=True)
190
+ # change dtype on prediction column
191
+ testing_results['prediction']=testing_results['prediction'].apply(lambda x:round(x))
192
+ testing_results['date']=testing_results['date'].dt.date
193
+ d2['prediction']=d2['prediction'].apply(lambda x:round(x))
194
+ d2['date']=d2['date'].dt.date
195
+ # training_data=train_dataset.loc[(train_dataset['store']==store)&(train_dataset['item']==item)][['date','Lead_1']].iloc[-60:,:]
196
+ #---------------------------------------------forecast plot---------------------------------------------
197
+ fig = go.Figure([
198
+ # go.Scatter(x=training_data['date'],y=training_data['Lead_1'],name='Train Observed',line=dict(color='rgba(50, 205, 50, 0.7)')),
199
+ #go.Scatter(x=y_train_pred['ds'],y=y_train_pred['yhat'],name='Prophet Pred.(10 Item)',line=dict(color='blue', dash='dot')),
200
+ go.Scatter(x=testing_results['date'], y=testing_results['Lead_1'],name='Observed',line=dict(color='rgba(218, 112, 214, 0.5)')),
201
+ go.Scatter(x=testing_results['date'],y=testing_results['prediction'],name='Historical Forecast',line=dict(color='#9400D3', dash='dash')),
202
+ go.Scatter(x=d2['date'],y=d2['prediction'],name='Future Forecast',line=dict(color='Dark Orange', dash='dot'))])
203
+ fig.update_layout(
204
+ xaxis_title='Date',
205
+ yaxis_title='Order Demand',
206
+ margin=dict(l=0, r=0, t=50, b=0),
207
+ xaxis=dict(title_font=dict(size=20)),
208
+ yaxis=dict(title_font=dict(size=20)))
209
+ fig.update_layout(width=900,height=400)
210
+ tab1.plotly_chart(fig)
211
+ #----------------------------------------------Tab-2------------------------------------------------------------
212
+ tab2.markdown("""
213
+ <div style='text-align: left; margin-top:-10px;'>
214
+ <h2 style='font-size: 30px; font-family: Palatino, serif;
215
+ letter-spacing: 2px; text-decoration: none;'>
216
+ &#x1F4C3;
217
+ <span style='background: linear-gradient(45deg, #ed4965, #c05aaf);
218
+ -webkit-background-clip: text;
219
+ -webkit-text-fill-color: transparent;
220
+ text-shadow: none;'>
221
+ Forecast Table
222
+ </span>
223
+ <span style='font-size: 40%;'>
224
+ <sup style='position: relative; top: 5px; color: #ed4965;'></sup>
225
+ </span>
226
+ </h2>
227
+ </div>
228
+ """, unsafe_allow_html=True)
229
+ final_r=pd.concat([d2[['date','store','item','prediction']],testing_results[['date','store','item','prediction']]]).sort_values('date').drop_duplicates().reset_index(drop=True)
230
+ csv = convert_df(final_r)
231
+ tab2.dataframe(final_r,width=500)
232
+ tab2.download_button(
233
+ "Download",
234
+ csv,
235
+ "file.csv",
236
+ "text/csv",
237
+ key='download-csv'
238
+ )
239
+ except:
240
+ st.sidebar.error('Model Not Loaded successfully!',icon="🚨")
241
+
242
+ #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
243
+ #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
244
+
245
+ elif option=='Prophet':
246
+ print("prophet")
247
+ #---------------------------------------------------Data----------------------------------------------------
248
+ # Prophet data
249
+ path='data/train.csv'
250
+ obj=StoreDataLoader(path)
251
+ fb_train_data,fb_test_data,item_dummay,store_dummay=obj.fb_data()
252
+ # st.write(fb_train_data.columns)
253
+ # st.write(fb_test_data.columns)
254
+ # print(fb_test_data.columns)
255
+ print(f"TRAINING ::START DATE ::{fb_train_data['ds'].min()} :: END DATE ::{fb_train_data['ds'].max()}")
256
+ print(f"TESTING ::START DATE ::{fb_test_data['ds'].min()} :: END DATE ::{fb_test_data['ds'].max()}")
257
+ train_new=fb_train_data.drop('y',axis=1)
258
+ test_new=fb_test_data.drop('y',axis=1)
259
+ #----------------------------------------------model Load----------------------------------------------------
260
+ try:
261
+ fb_model=model_obj.store_model_load(option)
262
+ # with st.sidebar:
263
+ # st.success('Model Loaded successfully', icon="✅")
264
+ #-------------------------------------select store & item ---------------------------------------------------
265
+ list_items=item_dummay.columns
266
+ list_store=store_dummay.columns
267
+ with st.sidebar:
268
+ store=st.selectbox("Select Store",list_store)
269
+ item=st.selectbox("Select Product",list_items)
270
+ #------------------------------------------prediction---------------------------------------------------------------
271
+ test_prediction=fb_model.predict(test_new.loc[test_new[item]==1])
272
+ train_prediction=fb_model.predict(train_new.loc[train_new[item]==1])
273
+
274
+ y_true_test=fb_test_data.loc[fb_test_data[item]==1]
275
+ y_true_train=fb_train_data.loc[fb_train_data[item]==1]
276
+
277
+ y_train_pred=train_prediction[['ds','yhat']].iloc[-60:,:]
278
+ y_train_true=y_true_train[['ds','y']].iloc[-60:,:]
279
+
280
+ y_test_pred=test_prediction[['ds','yhat']]
281
+ y_test_true=y_true_test[['ds','y']]
282
+ #----------------------------------------KPI---------------------------------------------------------------
283
+ rmse=np.sqrt(mean_squared_error(y_test_true['y'],y_test_pred['yhat']))
284
+ mae=mean_absolute_error(y_test_true['y'],y_test_pred['yhat'])
285
+ #---------------------------------future prediction---------------------------------------
286
+ fb_final=pd.concat([fb_train_data,fb_test_data])
287
+ # extract the data for selected store and item
288
+ fb_consumer=fb_final.loc[(fb_final[store]==1) & (fb_final[item]==1)]
289
+
290
+ # list of dates and prediction
291
+ date_list=[]
292
+ prediction_list=[]
293
+
294
+ # predicting the next 30 days product demand
295
+ for i in range(30):
296
+ # select only date record
297
+ next_prediction=fb_consumer.tail(1).drop('y',axis=1) # drop target of last 01/01/2015 00:00:00
298
+ # predict next timestep demand
299
+ prediction=fb_model.predict(next_prediction) # pass other feature value to the model
300
+
301
+ # append date and predicted demand
302
+ date_list.append(prediction['ds'][0]) ## append the datetime of prediction
303
+ prediction_list.append(prediction['yhat'][0]) ## append the next timestep prediction
304
+
305
+
306
+ #--------------------------next timestep data simulate-------------------------------------------------------------
307
+ last_data = fb_consumer[lambda x: x.ds == x.ds.max()] # last date present in data
308
+ # next timestep
309
+ decoder_data = pd.concat(
310
+ [last_data.assign(ds=lambda x: x.ds + pd.offsets.DateOffset(i)) for i in range(1, 2)],
311
+ ignore_index=True,
312
+ )
313
+ # update next timestep datetime covariates
314
+ decoder_data=create_week_date_featues(decoder_data,'ds')
315
+ # update last day demand prediction to the here as an actual demand value(using for more future timestep prediction)
316
+ decoder_data['sales']=prediction['yhat'][0] # assume next timestep prediction as actual
317
+ # update this next record into the original data
318
+ fb_consumer=pd.concat([fb_consumer,decoder_data]) # append that next timestep data to into main data
319
+ # find shift of power usage and update into the datset
320
+ fb_consumer['lag_1']=fb_consumer['sales'].shift(1)
321
+ fb_consumer['lag_5']=fb_consumer['sales'].shift(5)
322
+ fb_consumer=fb_consumer.reset_index(drop=True) # reset_index
323
+ future_prediction=pd.DataFrame({"ds":date_list,"yhat":prediction_list})
324
+ future_prediction['store']=store
325
+ future_prediction['item']=item
326
+
327
+ with st.sidebar:
328
+ st.markdown(f"""
329
+ <style>
330
+ /* Sidebar header style */
331
+ .sidebar-header {{
332
+ padding: 1px;
333
+ background-color: #9966FF;
334
+ text-align: center;
335
+ font-size: 13px;
336
+ font-weight: bold;
337
+ color: #FFF ;
338
+ }}
339
+ </style>
340
+
341
+ <div class="sidebar-header">
342
+ Models Evalution
343
+ </div>
344
+ """,unsafe_allow_html=True)
345
+ st.dataframe(pd.DataFrame({"KPI":['RMSE','MAE'],"TFT":[7.73,6.17],"Prophet":[7.32,6.01]}).set_index('KPI'),width=300)
346
+ st.markdown(f"""
347
+ <style>
348
+ /* Sidebar header style */
349
+ .sidebar-header {{
350
+ padding: 3px;
351
+ background-color:linear-gradient(45deg, #ed4965, #c05aaf);
352
+ text-align: center;
353
+ font-size: 13px;
354
+ font-weight: bold;
355
+ color: #FFF ;
356
+ }}
357
+ </style>
358
+
359
+ <div class="sidebar-header">
360
+ KPI :: {item}
361
+ </div>
362
+ """,unsafe_allow_html=True)
363
+
364
+ st.dataframe(pd.DataFrame({"KPI":['RMSE','MAE'],"Prophet":[rmse,mae]}).set_index('KPI'),width=300)
365
+
366
+ #---------------------------------------Tabs-----------------------------------------------------------------------
367
+ tab1,tab2=st.tabs(['📈Forecast Plot','🗃Forecast Table'])
368
+ #-------------------------------------------Tab-1=Forecast plot---------------------------------------------------
369
+ tab1.markdown("""
370
+ <div style='text-align: left; margin-top:-10px;margin-bottom:-10px;'>
371
+ <h2 style='font-size: 30px; font-family: Palatino, serif;
372
+ letter-spacing: 2px; text-decoration: none;'>
373
+ &#x1F4C8;
374
+ <span style='background: linear-gradient(45deg, #ed4965, #c05aaf);
375
+ -webkit-background-clip: text;
376
+ -webkit-text-fill-color: transparent;
377
+ text-shadow: none;'>
378
+ Forecast Plot
379
+ </span>
380
+ <span style='font-size: 40%;'>
381
+ <sup style='position: relative; top: 5px; color: #ed4965;'></sup>
382
+ </span>
383
+ </h2>
384
+ </div>
385
+ """, unsafe_allow_html=True)
386
+
387
+ ## round fig.
388
+ y_train_true['y']=y_train_true['y'].astype('int')
389
+ y_train_pred['yhat']=y_train_pred['yhat'].astype('int')
390
+ y_test_true['y']=y_test_true['y'].astype('int')
391
+ y_test_pred['yhat']=y_test_pred['yhat'].astype('int')
392
+ future_prediction['yhat']=future_prediction['yhat'].astype('int')
393
+ y_train_true['ds']=y_train_true['ds'].dt.date
394
+ y_train_pred['ds']=y_train_pred['ds'].dt.date
395
+ y_test_true['ds']=y_test_true['ds'].dt.date
396
+ y_test_pred['ds']=y_test_pred['ds'].dt.date
397
+ future_prediction['ds']=future_prediction['ds'].dt.date
398
+
399
+ #-----------------------------plot---------------------------------------------------------------------------------------------
400
+ fig = go.Figure([
401
+ # go.Scatter(x=y_train_true['ds'],y=y_train_true['y'],name='Train Observed',line=dict(color='rgba(50, 205, 50, 0.7)' )),
402
+ # go.Scatter(x=y_train_pred['ds'],y=y_train_pred['yhat'],name='Prophet Pred.(10 Item)',line=dict(color='#32CD32', dash='dot')),
403
+ go.Scatter(x=y_test_true['ds'], y=y_test_true['y'],name='Observed',line=dict(color='rgba(218, 112, 214, 0.5)')),
404
+ go.Scatter(x=y_test_pred['ds'],y=y_test_pred['yhat'],name='Historical Forecast',line=dict(color='#9400D3', dash='dash')),
405
+ go.Scatter(x=future_prediction['ds'],y=future_prediction['yhat'],name='Future Forecast',line=dict(color='Dark Orange', dash='dot'))])
406
+ fig.update_layout(
407
+ xaxis_title='Date',
408
+ yaxis_title='Order Demand',
409
+ margin=dict(l=0, r=0, t=50, b=0),
410
+ xaxis=dict(title_font=dict(size=20)),
411
+ yaxis=dict(title_font=dict(size=20)))
412
+ fig.update_layout(width=900,height=400)
413
+ tab1.plotly_chart(fig)
414
+ #----------------------------------------Tab-2------------------------------------------------------------
415
+ results=y_test_pred.reset_index()
416
+ results['store']='store_1'
417
+ results['item']=item
418
+ tab2.markdown("""
419
+ <div style='text-align: left; margin-top:-10px;'>
420
+ <h2 style='font-size: 30px; font-family: Palatino, serif;
421
+ letter-spacing: 2px; text-decoration: none;'>
422
+ &#x1F4C3;
423
+ <span style='background: linear-gradient(45deg, #ed4965, #c05aaf);
424
+ -webkit-background-clip: text;
425
+ -webkit-text-fill-color: transparent;
426
+ text-shadow: none;'>
427
+ Forecast Table
428
+ </span>
429
+ <span style='font-size: 40%;'>
430
+ <sup style='position: relative; top: 5px; color: #ed4965;'></sup>
431
+ </span>
432
+ </h2>
433
+ </div>
434
+ """, unsafe_allow_html=True)
435
+ final_r=pd.concat([future_prediction[['ds','store','item','yhat']],results[['ds','store','item','yhat']]]).sort_values('ds').drop_duplicates().reset_index(drop=True)
436
+ csv = convert_df(final_r)
437
+ tab2.dataframe(final_r,width=500)
438
+ tab2.download_button(
439
+ "Download",
440
+ csv,
441
+ "file.csv",
442
+ "text/csv",
443
+ key='download-csv'
444
+ )
445
+ except:
446
+ st.sidebar.error('Model Not Loaded successfully!',icon="🚨")
447
+
448
+
449
+
pages/2_Energy Demand Forecasting.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from src.data import Energy_DataLoader
3
+ from src.model import Model_Load
4
+ import matplotlib.pyplot as plt
5
+ import seaborn as sns
6
+ import plotly.graph_objects as go
7
+ from sklearn.metrics import mean_absolute_error,mean_squared_error
8
+ import numpy as np
9
+ import pandas as pd
10
+ from streamlit.components.v1 import html
11
+ from src.prediction import test_pred,val_pred
12
+ ## Load model object
13
+ model_obj=Model_Load()
14
+
15
+
16
+ path='data/LD2011_2014.txt'
17
+ obj=Energy_DataLoader(path)
18
+
19
+ @st.cache_data
20
+ def convert_df(df):
21
+ return df.to_csv(index=False).encode('utf-8')
22
+
23
+
24
+ st.markdown("""
25
+ <div style='text-align: center; margin-top:-70px; margin-bottom: 5px;margin-left: -50px;'>
26
+ <h2 style='font-size: 20px; font-family: Courier New, monospace;
27
+ letter-spacing: 2px; text-decoration: none;'>
28
+ <img src="https://acis.affineanalytics.co.in/assets/images/logo_small.png" alt="logo" width="70" height="30">
29
+ <span style='background: linear-gradient(45deg, #ed4965, #c05aaf);
30
+ -webkit-background-clip: text;
31
+ -webkit-text-fill-color: transparent;
32
+ text-shadow: none;'>
33
+ Energy Demand Forecasting Dashboard
34
+ </span>
35
+ <span style='font-size: 40%;'>
36
+ <sup style='position: relative; top: 5px; color: #ed4965;'>by Affine</sup>
37
+ </span>
38
+ </h2>
39
+ </div>
40
+ """, unsafe_allow_html=True)
41
+
42
+ with st.sidebar:
43
+ st.markdown("""<div style='text-align: left; margin-top:-230px;margin-left:-40px;'>
44
+ <img src="https://affine.ai/wp-content/uploads/2023/05/Affine-Logo.svg" alt="logo" width="300" height="60">
45
+ </div>""", unsafe_allow_html=True)
46
+ # st.markdown(f"""<style>
47
+ # /* Sidebar header style */
48
+ # .sidebar-header {{
49
+ # padding: 1px;
50
+ # background-color: #9966FF;
51
+ # text-align: center;
52
+ # font-size: 13px;
53
+ # font-weight: bold;
54
+ # color: #FFF ;
55
+ # }}
56
+ # </style>
57
+
58
+ # <div class="sidebar-header" >
59
+ # Select Model
60
+ # </div>
61
+ # """,unsafe_allow_html=True)
62
+ option=st.selectbox("Select Model",['TFT','Prophet'])
63
+
64
+ if option=='TFT':
65
+ print("TFT")
66
+ ## TFT data
67
+ train_dataset,test_dataset,training,validation,earliest_time=obj.tft_data()
68
+ # st.write(earliest_time)
69
+ print(f"TRAINING ::START DATE ::{train_dataset['date'].min()} :: END DATE ::{train_dataset['date'].max()}")
70
+ print(f"TESTING ::START DATE ::{test_dataset['date'].min()} :: END DATE ::{test_dataset['date'].max()}")
71
+ consumer_list=train_dataset['consumer_id'].unique()
72
+ model=model_obj.energy_model_load(option)
73
+ with st.sidebar:
74
+ # st.success('Model Loaded successfully', icon="✅")
75
+ # st.markdown(f"""
76
+ # <style>
77
+ # /* Sidebar header style */
78
+ # .sidebar-header {{
79
+ # padding: 1px;
80
+ # background-color: #9966FF;
81
+ # text-align: center;
82
+ # font-size: 13px;
83
+ # font-weight: bold;
84
+ # color: #FFF ;
85
+ # }}
86
+ # </style>
87
+
88
+ # <div class="sidebar-header">
89
+ # Select Consumer ID
90
+ # </div>
91
+ # """,unsafe_allow_html=True)
92
+ consumer=st.selectbox("Select Consumer ID",consumer_list)
93
+ testing_results=test_pred(model,train_dataset=train_dataset,test_dataset=test_dataset
94
+ ,consumer_id=consumer)
95
+ rmse=np.around(np.sqrt(mean_squared_error(testing_results['Lead_1'],testing_results['prediction'])),2)
96
+ mae=np.around(mean_absolute_error(testing_results['Lead_1'],testing_results['prediction']),2)
97
+ #-----------------------------------future prediction-----------------------------------------------
98
+ final_data=pd.concat([train_dataset,test_dataset])
99
+ consumer_data=final_data.loc[final_data['consumer_id']==consumer]
100
+ consumer_data.fillna(0,inplace=True)
101
+ date_list=[]
102
+ demand_prediction=[]
103
+ for i in range(24):
104
+ encoder_data = consumer_data[lambda x: x.hours_from_start > x.hours_from_start.max() - 192]
105
+ last_data = consumer_data[lambda x: x.hours_from_start == x.hours_from_start.max()]
106
+
107
+ # prediction date and time
108
+ date_list.append(encoder_data.tail(1).iloc[-1,:]['date'])
109
+
110
+ test_prediction = model.predict(encoder_data,
111
+ mode="prediction",
112
+ trainer_kwargs=dict(accelerator="cpu"),
113
+ return_x=True)
114
+ decoder_data = pd.concat(
115
+ [last_data.assign(date=lambda x: x.date + pd.offsets.Hour(i)) for i in range(1, 2)],
116
+ ignore_index=True,
117
+ )
118
+ decoder_data['hours_from_start']=decoder_data['hours_from_start'].max()+1
119
+ decoder_data["days_from_start"] = (decoder_data["date"] - earliest_time).apply(lambda x:x.days)
120
+ decoder_data['hour'] = decoder_data['date'].dt.hour
121
+ decoder_data['day'] = decoder_data['date'].dt.day
122
+ decoder_data['day_of_week'] = decoder_data['date'].dt.dayofweek
123
+ decoder_data['month'] = decoder_data['date'].dt.month
124
+ decoder_data['power_usage']=float(test_prediction.output[0][-1])
125
+ demand_prediction.append(float(test_prediction.output[0][-1]))
126
+ decoder_data['time_idx']=int(test_prediction.x['decoder_time_idx'][0][-1])
127
+ consumer_data=pd.concat([consumer_data,decoder_data])
128
+ consumer_data['lag_1']=consumer_data['power_usage'].shift(1)
129
+ consumer_data['lag_5']=consumer_data['power_usage'].shift(5)
130
+ consumer_data=consumer_data.reset_index(drop=True)
131
+ d2=pd.DataFrame({"date":date_list,"prediction":demand_prediction})[['date','prediction']]
132
+ d2['consumer_id']=consumer
133
+ print(f"TEST DATA = Consumer ID : {consumer} :: MAE : {mae} :: RMSE : {rmse}")
134
+ with st.sidebar:
135
+ st.markdown(f"""
136
+ <style>
137
+ /* Sidebar header style */
138
+ .sidebar-header {{
139
+ padding: 1px;
140
+ background-color: #9966FF;
141
+ text-align: center;
142
+ font-size: 13px;
143
+ font-weight: bold;
144
+ color: #FFF ;
145
+ }}
146
+ </style>
147
+
148
+ <div class="sidebar-header">
149
+ Models Evalution
150
+ </div>
151
+ """,unsafe_allow_html=True)
152
+ # st.write("Models Evalution")
153
+ st.dataframe(pd.DataFrame({"KPI":['RMSE','MAE'],"TFT":[8.67,6.48],"Prophet":[12.82,9.79]}).set_index('KPI'),width=300)
154
+ st.markdown(f"""
155
+ <style>
156
+ /* Sidebar header style */
157
+ .sidebar-header {{
158
+ padding: 1px;
159
+ background-color:linear-gradient(45deg, #ed4965, #c05aaf);
160
+ text-align: center;
161
+ font-size: 13px;
162
+ font-weight: bold;
163
+ color: #FFF ;
164
+ }}
165
+ </style>
166
+
167
+ <div class="sidebar-header">
168
+ KPI :: {consumer}
169
+ </div>
170
+ """,unsafe_allow_html=True)
171
+ st.dataframe(pd.DataFrame({"KPI":['RMSE','MAE'],"TFT":[rmse,mae]}).set_index('KPI'),width=300)
172
+ #--------------------------------------------------------------------------------------------------------------
173
+ # tabs
174
+ tab1,tab2=st.tabs(['📈Forecast Plot','🗃Forecast Table'])
175
+ #------------------------------------------------Tab-1-----------------------------------------------------------
176
+ # tab2.write(testing_results)
177
+ tab1.markdown("""
178
+ <div style='text-align: left; margin-top:-10px;margin-bottom:-10px;'>
179
+ <h2 style='font-size: 30px; font-family: Palatino, serif;
180
+ letter-spacing: 2px; text-decoration: none;'>
181
+ &#x1F4C8;
182
+ <span style='background: linear-gradient(45deg, #ed4965, #c05aaf);
183
+ -webkit-background-clip: text;
184
+ -webkit-text-fill-color: transparent;
185
+ text-shadow: none;'>
186
+ Forecast Plot
187
+ </span>
188
+ <span style='font-size: 40%;'>
189
+ <sup style='position: relative; top: 5px; color: #ed4965;'></sup>
190
+ </span>
191
+ </h2>
192
+ </div>
193
+ """, unsafe_allow_html=True)
194
+ # testing_results['prediction']=testing_results['prediction'].astype('int')
195
+ training_data=train_dataset.loc[(train_dataset['consumer_id']==consumer)][['date','Lead_1']].iloc[-100:,:]
196
+ fig = go.Figure([
197
+ # go.Scatter(x=training_data['date'],y=training_data['Lead_1'],name='Train Observed',line=dict(color='blue')),
198
+ #go.Scatter(x=y_train_pred['ds'],y=y_train_pred['yhat'],name='Prophet Pred.(10 Item)',line=dict(color='blue', dash='dot')),
199
+ go.Scatter(x=testing_results['date'], y=testing_results['Lead_1'],name='Observed',line=dict(color='purple')),
200
+ go.Scatter(x=testing_results['date'],y=testing_results['prediction'],name='Historical Forecast',line=dict(color='purple', dash='dot')),
201
+ go.Scatter(x=d2['date'],y=d2['prediction'],name='Future Forecast',line=dict(color='Dark Orange', dash='dot'))])
202
+ fig.update_layout(
203
+ xaxis_title='Date',
204
+ yaxis_title='Energy Demand',
205
+ margin=dict(l=0, r=0, t=50, b=0),
206
+ xaxis=dict(title_font=dict(size=20)),
207
+ yaxis=dict(title_font=dict(size=20)))
208
+ fig.update_layout(width=900,height=400)
209
+ tab1.plotly_chart(fig)
210
+ #----------------------------------------------Tab-2------------------------------------------------------------
211
+ tab2.markdown("""
212
+ <div style='text-align: left; margin-top:-10px;'>
213
+ <h2 style='font-size: 30px; font-family: Palatino, serif;
214
+ letter-spacing: 2px; text-decoration: none;'>
215
+ &#x1F4C3;
216
+ <span style='background: linear-gradient(45deg, #ed4965, #c05aaf);
217
+ -webkit-background-clip: text;
218
+ -webkit-text-fill-color: transparent;
219
+ text-shadow: none;'>
220
+ Forecast Table
221
+ </span>
222
+ <span style='font-size: 40%;'>
223
+ <sup style='position: relative; top: 5px; color: #ed4965;'></sup>
224
+ </span>
225
+ </h2>
226
+ </div>
227
+ """, unsafe_allow_html=True)
228
+ final_r=pd.concat([d2[['date','consumer_id','prediction']],testing_results[['date','consumer_id','prediction']]]).sort_values('date').reset_index(drop=True)
229
+ csv = convert_df(final_r)
230
+ tab2.dataframe(final_r,width=500)
231
+ tab2.download_button(
232
+ "Download",
233
+ csv,
234
+ "file.csv",
235
+ "text/csv",
236
+ key='download-csv'
237
+ )
238
+ # except:
239
+ # st.sidebar.error('Model Not Loaded successfully!',icon="🚨")
240
+
241
+
242
+
243
+
244
+
245
+ elif option=='Prophet':
246
+ print("prophet")
247
+ # Prophet data
248
+ fb_train_data,fb_test_data,consumer_dummay=obj.fb_data()
249
+ # print('*'*50)
250
+ # fb_test_data
251
+ # print('*'*50)
252
+ print(f"TRAINING ::START DATE ::{fb_train_data['ds'].min()} :: END DATE ::{fb_train_data['ds'].max()}")
253
+ print(f"TESTING ::START DATE ::{fb_test_data['ds'].min()} :: END DATE ::{fb_test_data['ds'].max()}")
254
+ train_new=fb_train_data.drop('y',axis=1)
255
+ test_new=fb_test_data.drop('y',axis=1)
256
+ try:
257
+ model=model_obj.energy_model_load(option)
258
+ # with st.sidebar:
259
+ # st.success('Model Loaded successfully.', icon="✅")
260
+ except:
261
+ st.error('Model Not Loaded successfully!',icon="🚨")
262
+ with st.sidebar:
263
+ # st.markdown(f"""
264
+ # <style>
265
+ # /* Sidebar header style */
266
+ # .sidebar-header {{
267
+ # padding: 2px;
268
+ # background-color: #9966FF;
269
+ # text-align: center;
270
+ # font-size: 8px;
271
+ # font-weight: bold;
272
+ # color: #FFF ;
273
+ # }}
274
+ # </style>
275
+
276
+ # <div class="sidebar-header">
277
+ # Select Consumer ID
278
+ # </div>
279
+ # """,unsafe_allow_html=True)
280
+ consumer=st.selectbox("Select Consumer ID",consumer_dummay)
281
+
282
+ test_prediction=model.predict(test_new.loc[test_new[consumer]==1])
283
+ # train_prediction=model.predict(train_new.loc[train_new[consumer]==1])
284
+
285
+ y_true_test=fb_test_data.loc[fb_test_data[consumer]==1]
286
+ y_true_train=fb_train_data.loc[fb_train_data[consumer]==1]
287
+
288
+ # y_train_pred=train_prediction[['ds','yhat']].iloc[-60:,:]
289
+ y_train_true=y_true_train[['ds','y']].iloc[-60:,:]
290
+
291
+ y_test_pred=test_prediction[['ds','yhat']]
292
+ y_test_true=y_true_test[['ds','y']]
293
+
294
+ fb_final=pd.concat([fb_train_data,fb_test_data])
295
+ fb_consumer=fb_final.loc[fb_final[consumer]==1]
296
+ date_list=[]
297
+ prediction_list=[]
298
+ for i in range(24):
299
+ next_prediction=fb_consumer.tail(1).drop('y',axis=1) # drop target of last 01/01/2015 00:00:00
300
+ # print(next_prediction)
301
+ prediction=model.predict(next_prediction) # pass other feature value to the model
302
+ # print('*'*20)
303
+ # print("DateTime :: ",prediction['ds'][0])
304
+ # print("Prediction ::",prediction['yhat'][0])
305
+ date_list.append(prediction['ds'][0]) ## append the datetime of prediction
306
+ prediction_list.append(prediction['yhat'][0]) ## append the next timestep prediction
307
+
308
+ last_data = fb_consumer[lambda x: x.ds == x.ds.max()] # last date present in data
309
+
310
+ #--------------------------next timestep data simulate-------------------------------------------------------------
311
+ decoder_data = pd.concat(
312
+ [last_data.assign(ds=lambda x: x.ds + pd.offsets.Hour(i)) for i in range(1, 2)],
313
+ ignore_index=True,
314
+ )
315
+ decoder_data['hour'] = decoder_data['ds'].dt.hour
316
+ decoder_data['day'] = decoder_data['ds'].dt.day
317
+ decoder_data['day_of_week'] = decoder_data['ds'].dt.dayofweek
318
+ decoder_data['month'] = decoder_data['ds'].dt.month
319
+ decoder_data['power_usage']=prediction['yhat'][0] # assume next timestep prediction as actual
320
+ fb_consumer=pd.concat([fb_consumer,decoder_data]) # append that next timestep data to into main data
321
+ fb_consumer['lag_1']=fb_consumer['power_usage'].shift(1) # again find shift of power usage and update into the datset
322
+ fb_consumer['lag_5']=fb_consumer['power_usage'].shift(5) #
323
+ fb_consumer=fb_consumer.reset_index(drop=True)
324
+ future_prediction=pd.DataFrame({'ds':date_list,"yhat":prediction_list})
325
+ future_prediction['consumer_id']=consumer
326
+ tab1,tab2=st.tabs(['📈Forecast Plot','🗃Forecast Table'])
327
+ tab1.markdown("""
328
+ <div style='text-align: left; margin-top:-10px;margin-bottom:-10px;'>
329
+ <h2 style='font-size: 30px; font-family: Palatino, serif;
330
+ letter-spacing: 2px; text-decoration: none;'>
331
+ &#x1F4C8;
332
+ <span style='background: linear-gradient(45deg, #ed4965, #c05aaf);
333
+ -webkit-background-clip: text;
334
+ -webkit-text-fill-color: transparent;
335
+ text-shadow: none;'>
336
+ Forecast Plot
337
+ </span>
338
+ <span style='font-size: 40%;'>
339
+ <sup style='position: relative; top: 5px; color: #ed4965;'></sup>
340
+ </span>
341
+ </h2>
342
+ </div>
343
+ """, unsafe_allow_html=True)
344
+ y_train_true['y']=y_train_true['y'].astype('float')
345
+ # y_train_pred['yhat']=y_train_pred['yhat'].astype('float')
346
+ y_test_true['y']=y_test_true['y'].astype('float')
347
+ y_test_pred['yhat']=y_test_pred['yhat'].astype('float')
348
+
349
+ fig = go.Figure([
350
+ # go.Scatter(x=y_train_true['ds'],y=y_train_true['y'],name='Train Observed',line=dict(color='blue')),
351
+ #go.Scatter(x=y_train_pred['ds'],y=y_train_pred['yhat'],name='Prophet Pred.(10 Consumer)',line=dict(color='blue', dash='dot')),
352
+ go.Scatter(x=y_test_true['ds'], y=y_test_true['y'],name='Observed',line=dict(color='purple')),
353
+ go.Scatter(x=y_test_pred['ds'],y=y_test_pred['yhat'],name='Historical Forecast',line=dict(color='purple', dash='dot')),
354
+ go.Scatter(x=future_prediction['ds'],y=future_prediction['yhat'],name='Future Forecast',line=dict(color='Dark Orange', dash='dot'))
355
+ ])
356
+ fig.update_layout(
357
+ xaxis_title='Date',
358
+ yaxis_title='Energy Demand',
359
+ margin=dict(l=0, r=0, t=50, b=0),
360
+ xaxis=dict(title_font=dict(size=20)),
361
+ yaxis=dict(title_font=dict(size=20)))
362
+ fig.update_layout(width=900,height=400)
363
+ tab1.plotly_chart(fig)
364
+
365
+ rmse=np.sqrt(mean_squared_error(y_test_true['y'],y_test_pred['yhat']))
366
+ mae=mean_absolute_error(y_test_true['y'],y_test_pred['yhat'])
367
+ with st.sidebar:
368
+ st.markdown(f"""
369
+ <style>
370
+ /* Sidebar header style */
371
+ .sidebar-header {{
372
+ padding: 1px;
373
+ background-color: #9966FF;
374
+ text-align: center;
375
+ font-size: 13px;
376
+ font-weight: bold;
377
+ color: #FFF ;
378
+ }}
379
+ </style>
380
+
381
+ <div class="sidebar-header">
382
+ Models Evalution
383
+ </div>
384
+ """,unsafe_allow_html=True)
385
+ # st.write("Models Evalution")
386
+ st.dataframe(pd.DataFrame({"KPI":['RMSE','MAE'],"TFT":[8.67,6.48],"Prophet":[12.82,9.79]}).set_index('KPI'),width=300)
387
+ st.markdown(f"""
388
+ <style>
389
+ /* Sidebar header style */
390
+ .sidebar-header {{
391
+ padding: 2px;
392
+ background-color:linear-gradient(45deg, #ed4965, #c05aaf);
393
+ text-align: center;
394
+ font-size: 13px;
395
+ font-weight: bold;
396
+ color: #FFF ;
397
+ }}
398
+ </style>
399
+
400
+ <div class="sidebar-header">
401
+ KPI :: {consumer}
402
+ </div>
403
+ """,unsafe_allow_html=True)
404
+ st.dataframe(pd.DataFrame({"KPI":['RMSE','MAE'],"Prophet":[rmse,mae]}), width=300)
405
+ #----------------------------------------
406
+ results=y_test_pred.reset_index()
407
+ # results['y']=y_test_true['y'].reset_index(drop=True)
408
+ results['consumer_id']=consumer
409
+ # st.header("Tabular Results")
410
+ st.divider()
411
+
412
+ tab2.markdown("""
413
+ <div style='text-align: left; margin-top:-10px;'>
414
+ <h2 style='font-size: 30px; font-family: Palatino, serif;
415
+ letter-spacing: 2px; text-decoration: none;'>
416
+ &#x1F4C3;
417
+ <span style='background: linear-gradient(45deg, #ed4965, #c05aaf);
418
+ -webkit-background-clip: text;
419
+ -webkit-text-fill-color: transparent;
420
+ text-shadow: none;'>
421
+ Forecast Table
422
+ </span>
423
+ <span style='font-size: 40%;'>
424
+ <sup style='position: relative; top: 5px; color: #ed4965;'></sup>
425
+ </span>
426
+ </h2>
427
+ </div>
428
+ """, unsafe_allow_html=True)
429
+ final_results=pd.concat([future_prediction[['ds','consumer_id','yhat']],results[['ds','consumer_id','yhat']]]).sort_values('ds').reset_index(drop=True)
430
+ csv = convert_df(final_results)
431
+ tab2.dataframe(final_results,width=500)
432
+ tab2.download_button("Download",
433
+ csv,
434
+ "file.csv",
435
+ "text/csv",
436
+ key='download-csv')