File size: 29,906 Bytes
6872779
 
 
 
 
 
 
 
 
 
ed2cf1f
2089309
653e93b
 
ac16c70
 
653e93b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6872779
653e93b
 
 
330cb0a
653e93b
f611b2b
653e93b
6872779
 
 
 
 
 
 
 
 
 
 
653e93b
 
 
6872779
 
 
 
653e93b
 
 
 
 
 
2dae4e3
 
653e93b
 
2dae4e3
653e93b
 
 
 
 
 
 
 
 
 
 
 
 
2dae4e3
79111db
653e93b
6872779
 
653e93b
 
 
 
 
6872779
 
 
 
653e93b
2dae4e3
 
 
 
 
 
 
 
 
 
 
 
653e93b
6872779
 
 
2dae4e3
 
653e93b
 
2dae4e3
99fb501
 
 
653e93b
6872779
99fb501
2dae4e3
99fb501
 
 
 
653e93b
 
6872779
99fb501
6872779
b18c422
653e93b
 
 
b18c422
653e93b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d709518
653e93b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6872779
 
653e93b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6872779
653e93b
 
 
 
 
 
 
 
 
 
 
6872779
653e93b
 
 
 
 
 
6872779
653e93b
 
 
 
 
 
 
 
6872779
99fb501
653e93b
6872779
 
 
f611b2b
ca0370b
653e93b
ca0370b
 
 
6872779
653e93b
 
 
 
 
ca0370b
653e93b
 
 
ca0370b
6872779
653e93b
6872779
 
389e1b0
ca0370b
 
 
 
6872779
653e93b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ca0370b
653e93b
6872779
 
b18c422
 
 
 
 
 
 
 
 
 
 
653e93b
2dae4e3
 
653e93b
3a7655a
653e93b
2dae4e3
653e93b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6872779
d709518
653e93b
d709518
6872779
653e93b
6872779
d709518
653e93b
ca0370b
 
 
653e93b
 
ca0370b
 
 
 
 
d709518
 
ca0370b
653e93b
ca0370b
653e93b
 
 
 
 
 
 
 
 
 
 
 
 
ca0370b
653e93b
 
 
 
 
 
 
 
ca0370b
653e93b
 
 
ca0370b
 
653e93b
 
 
ca0370b
 
653e93b
 
 
 
 
6872779
 
389e1b0
6872779
ca0370b
 
 
 
 
 
 
 
 
 
 
6872779
ca0370b
 
 
 
6872779
ca0370b
 
653e93b
6872779
 
 
389e1b0
b18c422
 
99fb501
b18c422
 
 
 
653e93b
b18c422
653e93b
b18c422
389e1b0
653e93b
b18c422
 
653e93b
b18c422
 
99fb501
 
 
 
 
 
 
ca0370b
3a7655a
653e93b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a7655a
 
 
389e1b0
3a7655a
99fb501
3a7655a
653e93b
 
 
 
 
 
99fb501
653e93b
 
 
99fb501
 
653e93b
 
 
 
 
 
3a7655a
653e93b
99fb501
653e93b
 
 
 
 
 
 
 
6872779
3a7655a
653e93b
 
 
 
3a7655a
 
 
 
 
 
 
 
99fb501
3a7655a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99fb501
3a7655a
 
99fb501
653e93b
b18c422
99fb501
 
 
 
3a7655a
 
389e1b0
3a7655a
653e93b
6872779
99fb501
 
 
 
 
 
653e93b
99fb501
 
653e93b
 
 
 
389e1b0
653e93b
 
 
 
 
 
99fb501
 
 
389e1b0
653e93b
389e1b0
99fb501
653e93b
 
 
 
389e1b0
99fb501
389e1b0
653e93b
 
389e1b0
99fb501
 
 
 
389e1b0
 
653e93b
 
 
 
 
 
 
 
99fb501
389e1b0
 
 
 
 
 
 
 
 
 
 
 
 
 
99fb501
 
3a7655a
 
 
 
 
 
 
 
 
 
 
 
99fb501
11fbc91
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
import requests
import pandas as pd
from io import StringIO
import streamlit as st
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
from statsmodels.tsa.stattools import acf
from statsmodels.graphics.tsaplots import plot_acf
import matplotlib.pyplot as plt
import folium
from streamlit_folium import st_folium
import seaborn as sns
import datetime
from entsoe.geo import load_zones
import branca
import pytz
import time
from entsoe import EntsoePandasClient
import geopandas as gpd


tz = pytz.timezone('Europe/Brussels')

def load_capacity_csv(path: str) -> dict:
    """Load installed capacities CSV into a dict: Country -> {tech: value} """
    df = pd.read_csv(path, index_col='Country')
    # Ensure numeric and handle missing
    df = df.replace({"NaN": np.nan}).astype(float)
    return df.to_dict(orient='index')

# Load installed capacities from CSV files
installed_capacities_2024 = load_capacity_csv('installed_capacities_2024.csv')
installed_capacities_2025 = load_capacity_csv('installed_capacities_2025.csv')

TECHS = ['Solar', 'Wind Offshore', 'Wind Onshore']
#countries = [ 'AT', 'BE', 'NL',  'BG', 'HR', 'CZ', 'DE_LU', 'DK_1', 'DK_2',
#'EE', 'FI', 'FR', 'GR', 'HU',  'IT_CALA', 'IT_CNOR',
#'IT_CSUD', 'IT_NORD', 'IT_SARD', 'IT_SICI', 'IT_SUD', 'LV', 'LT', 
#'NO_1', 'NO_2', 'NO_3', 'NO_4', 'NO_5', 'PL', 'PT', 'RO',
#'SE_1', 'SE_2', 'SE_3', 'SE_4', 'RS', 'SK', 'SI', 'ES', 'CH', 'ME','IE_SEM','MK','CY','BA','AL','XK']

countries = ['AT', 'BE', 'DE_LU', 'DK_1', 'DK_2', 'FR', 'IT_CALA', 'IT_CNOR',
        'IT_CSUD', 'IT_NORD', 'IT_SARD', 'IT_SICI', 'IT_SUD',
        'NL', 'PT', 'ES']

def get_time_zone(country_code):

    tz_map = {
        'AL': 'Europe/Tirane',
        'AT': 'Europe/Vienna',
        'BE': 'Europe/Brussels',
        'BA': 'Europe/Sarajevo',
        'BG': 'Europe/Sofia',
        'HR': 'Europe/Zagreb',
        'CY': 'Asia/Nicosia',
        'CZ': 'Europe/Prague',
        'DE_LU': 'Europe/Berlin',
        'DK_1': 'Europe/Copenhagen',
        'DK_2': 'Europe/Copenhagen',
        'EE': 'Europe/Tallinn',
        'FI': 'Europe/Helsinki',
        'MK': 'Europe/Skopje',
        'FR': 'Europe/Paris',
        'GR': 'Europe/Athens',
        'HU': 'Europe/Budapest',
        'IS': 'Atlantic/Reykjavik',
        'IE_SEM': 'Europe/Dublin',
        'IT_CALA': 'Europe/Rome',
        'IT_CNOR': 'Europe/Rome',
        'IT_CSUD': 'Europe/Rome',
        'IT_NORD': 'Europe/Rome',
        'IT_SARD': 'Europe/Rome',
        'IT_SICI': 'Europe/Rome',
        'IT_SUD': 'Europe/Rome',
        'LV': 'Europe/Riga',
        'LT': 'Europe/Vilnius',
        'ME': 'Europe/Podgorica',
        'NL': 'Europe/Amsterdam',
        'NO_1': 'Europe/Oslo',
        'NO_2': 'Europe/Oslo',
        'NO_3': 'Europe/Oslo',
        'NO_4': 'Europe/Oslo',
        'NO_5': 'Europe/Oslo',
        'PL': 'Europe/Warsaw',
        'PT': 'Europe/Lisbon',
        'MD': 'Europe/Chisinau',
        'RO': 'Europe/Bucharest',
        'SE_1': 'Europe/Stockholm',
        'SE_2': 'Europe/Stockholm',
        'SE_3': 'Europe/Stockholm',
        'SE_4': 'Europe/Stockholm',
        'RS': 'Europe/Belgrade',
        'SK': 'Europe/Bratislava',
        'SI': 'Europe/Ljubljana',
        'ES': 'Europe/Madrid',
        'CH': 'Europe/Zurich',
        'XK': 'Europe/Rome'
    }
    if country_code in tz_map:
        return tz_map[country_code]
    else:
        raise ValueError(f"Time zone for country code {country_code} is not defined.")
    
def convert_European_time(data, bdz):
    time_zone = get_time_zone(bdz)
    data.index = pd.to_datetime(data.index, utc=True)
    data.index = data.index.tz_convert(time_zone)
    data.index = data.index.tz_localize(None)
    return data

def filter_dataframe(df):
    allowed_columns = {"Load_entsoe", "Load_forecast_entsoe", "Solar_entsoe", "Solar_forecast_entsoe", "Wind_onshore_entsoe", "Wind_onshore_forecast_entsoe", "Wind_offshore_entsoe", "Wind_offshore_forecast_entsoe"}
    return df[[col for col in df.columns if col in allowed_columns]]

def load_GitHub(github_token, bdz):

    file_name=f'{bdz}_Entsoe_UTC.csv'
    url = f'https://raw.githubusercontent.com/margaridamascarenhas/Transparency_Data/main/{file_name}'
    headers = {'Authorization': f'token {github_token}'}

    response = requests.get(url, headers=headers)

    if response.status_code == 200:
        csv_content = StringIO(response.text)
        df = pd.read_csv(csv_content)
        if 'Date' in df.columns:
            df['Date'] = pd.to_datetime(df['Date'])  # Convert 'Date' column to datetime
            df.set_index('Date', inplace=True)  # Set 'Date' column as the index
            df=filter_dataframe(df)
            df=convert_European_time(df, bdz)
        return df[df.index >= pd.Timestamp('2024-01-01')]
    else:
        print(f"Failed to download {file_name}. Status code: {response.status_code}")
        return None

def filter_variable_options(df):
    all_options = {
        "Load": ("Load_entsoe", "Load_forecast_entsoe"),
        "Solar": ("Solar_entsoe", "Solar_forecast_entsoe"),
        "Wind Onshore": ("Wind_onshore_entsoe", "Wind_onshore_forecast_entsoe"),
        "Wind Offshore": ("Wind_offshore_entsoe", "Wind_offshore_forecast_entsoe"),
    }
    
    variable_options = {}
    flagged_columns = []
    
    for key, (col1, col2) in all_options.items():
        col1_exists = col1 in df.columns and not df[col1].isna().all()
        col2_exists = col2 in df.columns and not df[col2].isna().all()
        if col1_exists and col2_exists:
            variable_options[key] = (col1, col2)
        elif not col1_exists and col2_exists:
            flagged_columns.append(col1)
        elif col1_exists and not col2_exists:
            flagged_columns.append(col2)
        elif not col1_exists and not col2_exists:
            flagged_columns.append(col1)
            flagged_columns.append(col2)
    return variable_options, flagged_columns

github_token = st.secrets["GitHub_Token_KUL_Margarida"]
#countries = ['IT_CALA', 'IT_CNOR', 'IT_CSUD', 'IT_SARD', 'PT', 'FR']

if github_token:
    data_dict = {}
    for bdz in countries:
        df = load_GitHub(github_token, bdz)
        if df is not None:
            data_dict[bdz] = df

else:
    print("Please enter your GitHub Personal Access Token to proceed.")

col1, col2 = st.columns([5, 2])  
with col1:
    st.title("Transparency++")

with col2:
    upper_space = col2.empty()
    upper_space = col2.empty()
    col2_1, col2_2 = st.columns(2)  # Create two columns within the right column for side-by-side images
    with col2_1:
        st.image("KU_Leuven_logo.png", width=100)   # Adjust the path and width as needed
    with col2_2:
        st.image("energyville_logo.png", width=100) 

st.write("**Evaluate and analyze ENTSO-E Transparency Platform data quality, forecast accuracy, and energy trends for ENTSO-E member countries.**")

st.sidebar.header('Filters')

st.sidebar.subheader("Select Country")
st.sidebar.caption("Choose the country for which you want to display data or forecasts.")
selection = ['Overall'] + list(countries)
selected_country = st.sidebar.selectbox('Select Country', selection)

if selected_country != 'Overall':
    st.sidebar.subheader("Section")
    st.sidebar.caption("Select the type of information you want to explore.")
    section = st.sidebar.radio('', ['Data Quality', 'Forecasts Quality', 'Insights'], index=1)
else:
    section = None  # No section is shown when "Overall" is selected

if selected_country == 'Overall':
    data = None  # You can set data to None or a specific dataset based on your logic
    section = None  # No section selected when "Overall" is chosen
else:
    country_code = selected_country
    data = data_dict.get(selected_country)

if section == 'Data Quality':
    st.header('Data Quality')

    # Determine if capacities missing per year
    caps4 = installed_capacities_2024.get(country_code)
    caps5 = installed_capacities_2025.get(country_code)

    st.write(
        "The table below presents the data quality metrics focusing on the percentage "
        "of missing values and the occurrence of extreme or nonsensical values for "
        "the selected country. Additionally, it flags any mismatch between installed "
        "capacity (NaN or 0) and actual data in the dataset."
    )
    
    # Determine end of data slice (yesterday 23:59:59)
    yesterday = datetime.datetime.now(tz).date() - datetime.timedelta(days=1)
    end_time = pd.Timestamp(yesterday).replace(hour=23, minute=59, second=59)
    # Filter data
    data_quality = data[data.index <= end_time]

    tech_cols = {
        'Load':          ('Load_entsoe',             'Load_forecast_entsoe'),
        'Wind Onshore':  ('Wind_onshore_entsoe',     'Wind_onshore_forecast_entsoe'),
        'Wind Offshore': ('Wind_offshore_entsoe',    'Wind_offshore_forecast_entsoe'),
        'Solar':         ('Solar_entsoe',            'Solar_forecast_entsoe'),
    }

    skip_cols = []

    for tech_key, (act_col, fct_col) in tech_cols.items():
        # only proceed if the columns are in the DataFrame
        if act_col in data_quality.columns and fct_col in data_quality.columns:
            # get installed capacities for 2024 & 2025
            cap4 = caps4.get(tech_key, np.nan) if isinstance(caps4, dict) else np.nan
            cap5 = caps5.get(tech_key, np.nan) if isinstance(caps5, dict) else np.nan

            # if both years are missing or zero capacity
            if (pd.isna(cap4) or cap4 == 0) and (pd.isna(cap5) or cap5 == 0):
                act = data_quality[act_col]
                fct = data_quality[fct_col]
                # check if actual AND forecast are entirely zero or NaN
                only_zero_or_na = (act.fillna(0) == 0).all() and (fct.fillna(0) == 0).all()
                if only_zero_or_na:
                    skip_cols += [act_col, fct_col]

    # drop any columns flagged for skipping (ignore errors if somehow missing)
    if skip_cols:
        data_quality = data_quality.drop(columns=skip_cols, errors='ignore')

    # Compute missing
    missing_values = data_quality.isna().mean() * 100
    missing_values = missing_values.round(2)

    extreme_values = {}
    capacity_mismatch = {}
    neg_counts = {}
    over_counts = {}
    cutoff = pd.Timestamp('2025-01-01')

    # Iterate over columns
    for col in data_quality.columns:
        # Identify technology
        if 'Solar' in col:
            tech_key = 'Solar'
        elif 'Wind_onshore' in col:
            tech_key = 'Wind Onshore'
        elif 'Wind_offshore' in col:
            tech_key = 'Wind Offshore'
        elif 'Load' in col:
            tech_key = 'Load'
        else:
            extreme_values[col] = np.nan
            capacity_mismatch[col] = np.nan
            continue

        series = data_quality[col]
        # Year masks
        mask_2024 = series.index < cutoff
        # Fetch capacity values
        cap4 = caps4.get(tech_key, np.nan) if isinstance(caps4, dict) else np.nan
        cap5 = caps5.get(tech_key, np.nan) if isinstance(caps5, dict) else np.nan
        print('var:',col)
        print('cap4:',cap4)
        if tech_key == 'Load':
            # Negative load
            extreme_pct = round((series < 0).mean() * 100, 2)
            mismatch = np.nan
        else:
            # Create per-timestamp capacity
            cap_series = pd.Series(
                np.where(mask_2024, cap4, cap5),
                index=series.index
            )
            # Flags
            neg = series < 0
            over = (series > cap_series) & cap_series.notna()
            nonsense = neg | over
            extreme_pct = round(nonsense.mean() * 100, 2)
            # Mismatch: non-zero gen when cap missing or zero
            # cap4, cap5 are floats or NaN
            no_cap_2024 = pd.isna(cap4) or (cap4 == 0)
            no_cap_2025 = pd.isna(cap5) or (cap5 == 0)

            # check if there's at least one actual non-zero (treat NaN as 0)
            has_nonzero = (series.fillna(0) != 0).any()

            if no_cap_2024 and no_cap_2025 and has_nonzero:
                mismatch = 100.0
            else:
                mismatch = 0.0

        extreme_values[col] = extreme_pct
        capacity_mismatch[col] = mismatch

    display_extreme = {col: f"{val:.2f}" if not pd.isna(val) else ''
                       for col, val in extreme_values.items()}
    display_mismatch = {}
    for col, val in capacity_mismatch.items():
        if 'Load' in col:
            display_mismatch[col] = '-'
        else:
            display_mismatch[col] = '🚩' if val == 100.0 else ''

    # Build and render DataFrame
    metrics_df = pd.DataFrame({
        'Missing Values (%)': missing_values,
        'Extreme/Nonsensical Values (%)': pd.Series(display_extreme),
        'Capacity Mismatch Flag': pd.Series(display_mismatch)
    })

    st.dataframe(metrics_df.style.format({
        'Missing Values (%)': '{:.2f}',
        'Extreme/Nonsensical Values (%)': '{}'
    }))
    
    st.write('<b><u>Missing values (%)</u></b>: Percentage of missing values in the dataset',unsafe_allow_html=True)
    st.write('<b><u>Extreme/Nonsensical values (%)</u></b>: For Load, this is % of values below 0. For generation, it is negative or out-of-bound (> capacity).',unsafe_allow_html=True)
    st.write('<b><u>Capacity Mismatch Flag</u></b>: Shows "🚩" if installed capacity is `NaN` or `0` but the dataset has non-zero generation. Blank otherwise. For Load columns, it is "-".',unsafe_allow_html=True)

elif section == 'Forecasts Quality':

    st.header('Forecast Quality')
    
    # Time series for last 1 week
    last_week = data.loc[data.index >= (data.index[-1] - pd.Timedelta(days=7))]
    st.write('The below plot shows the time series of forecasts vs. observations provided by the ENTSO-E Transparency platform from the past week.')
    variable_options, flagged_columns = filter_variable_options(last_week)
    # Dropdown to select the variable
    selected_variable = st.selectbox("Select Variable for Line PLot", list(variable_options.keys()))
    actual_col, forecast_col = variable_options[selected_variable]

    x_vals = last_week.index.to_pydatetime().tolist()
    y_actual = last_week[actual_col].tolist()
    y_forecast = last_week[forecast_col].tolist()

    # then plot
    fig = go.Figure()
    fig.add_trace(go.Scatter(x=x_vals,y=y_actual,mode="lines",name="Actual"))
    fig.add_trace(go.Scatter(x=x_vals,y=y_forecast,mode="lines",name="Forecast ENTSO-E"))
    fig.update_layout(title=f"Forecasts vs Actual for {selected_variable}",xaxis_title="Date",yaxis_title="Value [MW]")
    st.plotly_chart(fig)


    # Scatter plots for error distribution
    st.subheader('Error Distribution')
    st.write('The below scatter plots show the error distribution of all fields: Solar, Wind and Load.')
    selected_variable = st.selectbox("Select Variable for Error Distribution", list(variable_options.keys()))

    # Get the corresponding columns for the selected variable
    actual_col, forecast_col = variable_options[selected_variable]

    if forecast_col in data.columns:
        # grab the two series, drop any NaNs, and align on their common timestamps
        obs = data[actual_col].dropna()
        pred = data[forecast_col].dropna()
        idx = obs.index.intersection(pred.index)
        obs = obs.loc[idx]
        pred = pred.loc[idx]

        # convert to pure Python lists
        x_vals = obs.tolist()
        y_vals = pred.tolist()

        fig = go.Figure()
        fig.add_trace(go.Scatter(x=x_vals,y=y_vals,mode='markers',name=f'{selected_variable}'))
        fig.update_layout(title=f'Error Distribution for {selected_variable}',xaxis_title='Observed [MW]',yaxis_title='Forecast ENTSO-E [MW]')

        st.plotly_chart(fig)

    st.subheader('Accuracy Metrics (Sorted by rMAE):')

    date_range = st.date_input(
        "Select Date Range for Metrics Calculation:",
        value=(pd.to_datetime("2024-01-01"), pd.to_datetime(pd.Timestamp('today')))
    )

    if len(date_range) == 2:
        start_date = pd.Timestamp(date_range[0])
        end_date = pd.Timestamp(date_range[1])
    else:
        st.error("Please select a valid date range.")
        st.stop()
    output_text = f"The below metrics are calculated from the selected date range from {start_date.strftime('%Y-%m-%d')} to {end_date.strftime('%Y-%m-%d')}. On the right is a radar plot with the rMAE."
    st.write(output_text)
    
    data_metrics = data.loc[start_date:end_date]

    accuracy_metrics = pd.DataFrame(columns=['MAE', 'RMSE' ,'rMAE'], index=list(variable_options.keys()))

    for variable in variable_options.keys():
        actual_col, forecast_col = variable_options[variable]
        obs = data_metrics[actual_col]
        pred = data_metrics[forecast_col]
        error = pred - obs
        
        mae = round(np.mean(np.abs(error)),2)
        if 'Load' in actual_col:
            persistence = obs.shift(168)  # Weekly persistence
        else:
            persistence = obs.shift(24)  # Daily persistence
        
        # Using the whole year's data for rMAE calculations
        rmae = round(mae / np.mean(np.abs(obs - persistence)),2)
        rmse = round(np.sqrt(np.mean((error)**2)), 2)
        row_label = variable #'Load' if 'Load' in actual_col else 'Solar' if 'Solar' in actual_col else 'Wind Offshore' if 'Wind_offshore' in actual_col else 'Wind Onshore'
        accuracy_metrics.loc[row_label] = [mae, rmse, rmae]

    accuracy_metrics.dropna(how='all', inplace=True)# Sort by rMAE (second column)
    accuracy_metrics.sort_values(by=accuracy_metrics.columns[-1], ascending=True, inplace=True)
    accuracy_metrics = accuracy_metrics.round(4)

    col1, col2 = st.columns([1, 1])

    with col1:
        # (optional) some top-margin before the table
        st.markdown(
            """
            <style>
            .small-chart-container {
                margin-top: 0px;
            }
            </style>
            """,
            unsafe_allow_html=True
        )
        st.dataframe(accuracy_metrics)

    with col2:
        # prepare the data
        rmae_values = accuracy_metrics['rMAE'].tolist()
        categories   = accuracy_metrics.index.tolist()

        # build the radar
        fig = go.Figure(
            go.Scatterpolar(
                r=rmae_values,
                theta=categories,
                fill='toself',
                name='rMAE'
            )
        )

        # πŸ‘‰ shrink the total size, and give extra left/right margin for your labels
        fig.update_layout(
            width=300,    # make the whole plot a bit smaller
            height=300,
            margin=dict(
                l=50,      # more space on the left for long category names
                r=60,      # and on the right, if needed
                t=20,
                b=20
            ),
            polar=dict(
                angularaxis=dict(
                    tickfont=dict(size=11)   # if you want slightly smaller ticks
                ),
                radialaxis=dict(
                    visible=True,
                    range=[0, max(rmae_values)*1.2]
                )
            ),
            showlegend=False
        )

        # wrap in a div so you can still control vertical spacing via CSS
        st.markdown('<div class="small-chart-container">', unsafe_allow_html=True)
        st.plotly_chart(fig, use_container_width=False)
        st.markdown('</div>', unsafe_allow_html=True)

    st.subheader('ACF plots of Errors')
    st.write('The below plots show the ACF (Auto-Correlation Function) for the errors of all three data fields obtained from ENTSO-E: Solar, Wind and Load.')

    # Dropdown to select the variable
    selected_variable = st.selectbox("Select Variable for ACF of Errors", list(variable_options.keys()))

    # Get the corresponding columns for the selected variable
    actual_col, forecast_col = variable_options[selected_variable]

    # Calculate the error and plot ACF if columns are available
    if forecast_col in data.columns:
        obs = data[actual_col]
        pred = data[forecast_col]
        error = pred - obs

        st.write(f"**ACF of Errors for {selected_variable}**")
        fig, ax = plt.subplots(figsize=(10, 5))
        plot_acf(error.dropna(), ax=ax)
        st.pyplot(fig)

        # Optionally calculate and store ACF values for further analysis if needed
        acf_values = acf(error.dropna(), nlags=240)

elif section == 'Insights':
    st.header("Insights")

    st.write('The scatter plots below are created to explore possible correlations between the data fields: Solar, Wind Onshore, Wind Offshore (if any), Load, and Weather Features.')
    # Add a selection box for the data resolution (weekly, daily, hourly)
    data_2024 = data[data.index.year == 2024]

    resolution = st.selectbox('Select data resolution:', ['Daily', 'Hourly'])

        # Resample data based on the selected resolution
    if resolution == 'Hourly':
        resampled_data = data
    elif resolution == 'Daily':
        resampled_data = data.resample('D').mean()  # Resample to daily mean


    resampled_data.columns = [col.replace('_entsoe', '').replace('_', ' ') for col in resampled_data.columns]

    # Drop missing values
    selected_df = resampled_data.dropna()

    # Create the scatter plots using seaborn's pairplot
    sns.set_theme(style="ticks")
    pairplot_fig = sns.pairplot(selected_df)

    # Display the pairplot in Streamlit
    st.pyplot(pairplot_fig)

elif selected_country == 'Overall':

    def calculate_net_load_error(df, country_code):
        #filter_df = df.dropna()
        filter_df = df.dropna(axis=1, how='all')
        filter_df = filter_df.dropna()

        if filter_df.empty:
            # Return something (e.g., None) if there's no data left
            print(country_code)
            return None, None
        net_load = filter_df['Load_entsoe'].copy()
        for col in ['Wind_onshore_entsoe', 'Solar_entsoe', 'Wind_offshore_entsoe']:
            if col in filter_df.columns:
                net_load -= filter_df[col]

        net_load_forecast = filter_df['Load_forecast_entsoe'].copy()
        for col in ['Wind_onshore_forecast_entsoe', 'Solar_forecast_entsoe', 'Wind_offshore_forecast_entsoe']:
            if col in filter_df.columns:
                net_load_forecast -= filter_df[col]
        # Calculate the error based on the latest values
        error = (net_load_forecast - net_load).iloc[-1]
        date = filter_df.index[-1].strftime("%Y-%m-%d %H:%M")  # Get the latest date in string format

        return error, date

    def plot_net_load_error_map(data_dict):
        # 1) compute your errors as before
        missing_zones={'ME','IE_SEM','MK','CY','BA','AL','XK'}
        net_load_errors = {
            country_code: calculate_net_load_error(data, country_code)
            for country_code, data in data_dict.items()
        }
        df_net_load_error = pd.DataFrame({
            "zoneName": list(net_load_errors),
            "net_load_error": [v[0] for v in net_load_errors.values()],
            "date":            [v[1] for v in net_load_errors.values()],
        })

        # 2) split your zones into standard vs. fallback
        selected = list(data_dict.keys())
        standard_zones = [z for z in selected if z not in missing_zones]
        fallback_zones = [z for z in selected if z in missing_zones]

        # 3a) load the standard ones with entsoe.load_zones
        date = pd.Timestamp.now()
        geo_std = load_zones(standard_zones, date).reset_index()

        # 3b) manually load the fallback ones
        gdfs = []
        for z in fallback_zones:
            fn = f"{z}.geojson"
            path = f'./geojson_missing/{fn}'
            g = gpd.read_file(path)
            g['zoneName'] = z
            gdfs.append(g)


        geo_fb = pd.concat(gdfs, ignore_index=True) if gdfs else gpd.GeoDataFrame()

        # 4) combine
        geo_data = pd.concat([geo_std, geo_fb], ignore_index=True)
        # Merge net_load_error and date into geo_data
        geo_data = geo_data.merge(df_net_load_error, on='zoneName', how='left')

        # Initialize the Folium map
        m = folium.Map(location=[46.6034, 1.8883], zoom_start=4, tiles="cartodb positron")

        # Calculate the maximum absolute net load error for normalization
        max_value = df_net_load_error['net_load_error'].abs().max()

        # Create a colormap with lighter shades
        colormap = branca.colormap.LinearColormap(
            colors=['#0D92F4', 'white', '#C62E2E'],  # Light blue to white to light coral
            vmin=-max_value,
            vmax=max_value,
            caption='Net Load Error [MW]'
        )

        # Define the style function
        def style_function(feature):
            net_load_error = feature['properties']['net_load_error']
            if net_load_error is None:
                return {'fillOpacity': 0.5, 'color': 'grey', 'weight': 0.5}
            else:
                fill_color = colormap(net_load_error)
                return {
                    'fillColor': fill_color,
                    'fillOpacity': 0.8,  # Set a constant opacity
                    'color': 'black',
                    'weight': 0.5
                }

        # Add the GeoJson layer with the custom style_function
        folium.GeoJson(
            geo_data,
            style_function=style_function,
            tooltip=folium.GeoJsonTooltip(
                fields=["zoneName", "net_load_error", "date"],
                aliases=["Country:", "Net Load Error [MW]:", "Date:"],
                localize=True
            )
        ).add_to(m)

        # Add the colormap to the map
        colormap.add_to(m)

        # Display the map
        _=st_folium(m, width=700, height=600)

    def calculate_mae(actual, forecast):
        return np.mean(np.abs(actual - forecast))

    def calculate_persistence_mae(data, shift_hours):
        return np.mean(np.abs(data - data.shift(shift_hours)))

    def calculate_rmae_for_country(df, variable_options):
        rmae = {}
        rmae['Load'] = calculate_mae(df['Load_entsoe'], df['Load_forecast_entsoe']) / calculate_persistence_mae(df['Load_entsoe'], 168)

        for variable in variable_options.keys():
            actual_col, forecast_col = variable_options[variable]
            rmae[variable] = calculate_mae(df[actual_col], df[forecast_col]) / calculate_persistence_mae(df[actual_col], 24)
        
        all_opt = ["Load", "Solar", "Wind Onshore", "Wind Offshore"]
        not_in_list2 = [elem for elem in all_opt if elem not in variable_options.keys()]

        for ele in not_in_list2:
            rmae[ele] = None

        return rmae

    def create_rmae_dataframe(data_dict):

        rmae_values = {'Country': [], 'Load': [], 'Wind Onshore': [], 'Wind Offshore': [], 'Solar': []}
        
        for country_name, df in data_dict.items():
            df_filtered = df.dropna()
            print(country_name)
            variable_options, flagged_columns = filter_variable_options(df_filtered)
            rmae = calculate_rmae_for_country(df_filtered, variable_options)
            
            rmae_values['Country'].append(country_name)
            
            for var, met in rmae.items():
                rmae_values[var].append(met)
        
        return pd.DataFrame(rmae_values)

    def plot_rmae_radar_chart(rmae_df):
        fig = go.Figure()
        
        # Dynamically adjust angles to exclude Wind_offshore if all values are NaN
        angles = ['Load']
        if not rmae_df['Wind Offshore'].isna().all():  # Only include Wind_offshore if it's not NaN for all countries
            angles.append('Wind Offshore')
        if not rmae_df['Wind Onshore'].isna().all():  # Only include Wind_offshore if it's not NaN for all countries
            angles.append('Wind Onshore')
        if not rmae_df['Solar'].isna().all():  # Only include Wind_offshore if it's not NaN for all countries
            angles.append('Solar')

        for _, row in rmae_df.iterrows():
            fig.add_trace(go.Scatterpolar(
                r=[row[angle] for angle in angles],
                theta=angles,
                fill='toself',
                name=row['Country']
            ))
        
        fig.update_layout(
            polar=dict(
                radialaxis=dict(visible=True, range=[0, 1.2])
            ),
            showlegend=True,
            title="rMAE Radar Chart by Country"
        )
        st.plotly_chart(fig)


    st.subheader("Net Load Error Map")
    st.write("""
        The net load error map highlights the error in the forecasted versus actual net load for each country. 
        Hover over each country to see details on the latest net load error and the timestamp (with the time zone of the corresponding country) of the last recorded data.
    """)

    plot_net_load_error_map(data_dict)

    st.subheader("rMAE of Forecasts published on ENTSO-E TP")
    st.write("""The rMAE of Forecasts chart compares the forecast accuracy of the predictions published by ENTSO-E Transparency Platform for Portugal, Spain, Belgium, France, Germany-Luxembourg, Austria, the Netherlands, Italy and Denmark. It shows the rMAE for onshore wind, offshore wind (if any), solar, and load demand, highlighting how well forecasts perform relative to a basic persistence model across these countries and energy sectors.""")

    rmae_df = create_rmae_dataframe(data_dict)

    # Add multiselect for country selection
    selected_countries = st.multiselect("Select Countries for Radar Plot", options=rmae_df['Country'].unique(), default=['BE', 'DE_LU', 'FR'])

    # Filter the dataframe based on the selected countries
    filtered_rmae_df = rmae_df[rmae_df['Country'].isin(selected_countries)]

    # Plot radar chart for the selected countries
    plot_rmae_radar_chart(filtered_rmae_df)