import streamlit as st import json import os from utils import load_and_process_data, create_time_series_plot, display_statistics, call_api if 'api_token' not in st.session_state: st.session_state.api_token = DEFAULT_TOKEN = os.getenv('NILM_API_TOKEN') page_id = 4 if 'current_page' not in st.session_state: st.session_state.current_page = page_id elif st.session_state.current_page != page_id: # Clear API response when switching to this page if 'api_response' in st.session_state: st.session_state.api_response = None # Update current page st.session_state.current_page = page_id # Initialize session state variables if 'current_file' not in st.session_state: st.session_state.current_file = None if 'json_data' not in st.session_state: st.session_state.json_data = None if 'api_response' not in st.session_state: st.session_state.api_response = None if 'using_default_file' not in st.session_state: st.session_state.using_default_file = True st.title("Non-Intrusive Load Monitoring (NILM) Analysis") st.markdown(""" This service provides detailed breakdown of energy consumption by analyzing aggregate power measurements. ### Features - Appliance-level energy consumption breakdown - Load pattern identification - Device usage analysis - Detailed consumption insights """) # Default file path default_file_path = "samples/4_NILM.json" # Adjust this path to your default file # File upload and processing uploaded_file = st.file_uploader("Upload JSON file (or use default)", type=['json']) # Load default file if no file is uploaded and using_default_file is True if uploaded_file is None and st.session_state.using_default_file: if os.path.exists(default_file_path): st.info(f"Using default file: {default_file_path}") with open(default_file_path, 'r') as f: file_contents = f.read() if st.session_state.current_file != file_contents: st.session_state.current_file = file_contents st.session_state.json_data = json.loads(file_contents) else: st.warning(f"Default file not found at: {default_file_path}") st.session_state.using_default_file = False # If a file is uploaded, process it if uploaded_file: st.session_state.using_default_file = False try: file_contents = uploaded_file.read() st.session_state.current_file = file_contents st.session_state.json_data = json.loads(file_contents) except Exception as e: st.error(f"Error processing file: {str(e)}") # Process and display data if available if st.session_state.json_data: try: dfs = load_and_process_data(st.session_state.json_data) if dfs: st.header("Input Data") tabs = st.tabs(["Visualization", "Raw JSON", "Statistics"]) with tabs[0]: for unit, df in dfs.items(): st.plotly_chart(create_time_series_plot(df, unit), use_container_width=True) with tabs[1]: st.json(st.session_state.json_data) with tabs[2]: display_statistics(dfs) if st.button("Run NILM Analysis"): if not st.session_state.api_token: st.error("Please enter your API token in the sidebar first.") else: with st.spinner("Performing NILM analysis..."): st.session_state.api_response = call_api( st.session_state.current_file, st.session_state.api_token, "inference_nilm" ) except Exception as e: st.error(f"Error processing data: {str(e)}") # Display API results if st.session_state.api_response: st.header("NILM Analysis Results") tabs = st.tabs(["Visualization", "Raw JSON", "Statistics"]) with tabs[0]: response_dfs = load_and_process_data( st.session_state.api_response, input_data=st.session_state.json_data ) if response_dfs: for unit, df in response_dfs.items(): st.plotly_chart(create_time_series_plot(df, unit), use_container_width=True) # Add appliance-specific visualizations st.subheader("Appliance-Level Breakdown") # Additional NILM-specific visualizations could be added here with tabs[1]: st.json(st.session_state.api_response) with tabs[2]: if response_dfs: display_statistics(response_dfs)