KIG / app.py
heymenn's picture
Update app.py
04fe71e verified
import streamlit as st
import pandas as pd
import logging
import time
import json # For displaying dicts/lists nicely
# Import core components from the refactored library
from kig_core.config import settings # Loads config on import
from kig_core.schemas import PlannerState, KeyIssue, GraphConfig
from kig_core.planner import build_graph
from kig_core.utils import key_issues_to_dataframe, dataframe_to_excel_bytes
from kig_core.graph_client import neo4j_client # Import the initialized client instance
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
# Configure logging for Streamlit app
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# --- Streamlit Page Configuration ---
st.set_page_config(page_title="Key Issue Generator (KIG)", layout="wide")
st.title(" KIG - Key Issue Generator ")
st.write("Generate structured Key Issues from knowledge graph context.")
# --- Sidebar ---
with st.sidebar:
st.header(" Status & Info ")
# Check Neo4j Connectivity on startup
neo4j_status = st.empty()
try:
# Accessing the client instance will trigger verification if not already done
neo4j_client._get_driver() # Ensure connection is attempted
neo4j_status.success("Neo4j Connection Verified")
can_run = True
except ConnectionError as e:
neo4j_status.error(f"Neo4j Error: {e}")
can_run = False
except Exception as e:
neo4j_status.error(f"Neo4j Init Error: {e}")
can_run = False
st.header("Configuration")
# Display some key settings (be careful with secrets)
st.text(f"Main LLM: {settings.main_llm_model}")
st.text(f"Neo4j URI: {settings.neo4j_uri}")
st.text(f"Plan Method: {settings.plan_method}")
st.text(f"Max Docs: {settings.max_docs}")
st.header("About")
st.info("""
This app uses LLMs and a Neo4j graph to:
1. Plan an approach based on your query.
2. Execute the plan, retrieving & processing graph data.
3. Generate structured Key Issues.
4. Output results to an Excel file.
""")
# --- Main Application Logic ---
st.header("Enter Your Query")
user_query = st.text_area(
"Describe the technical requirement or area you want to explore for Key Issues:",
"What are the main challenges and potential key issues in deploying edge computing for real-time AI-driven traffic management systems in smart cities?",
height=150
)
# Session state to store results across reruns if needed
if 'key_issues_result' not in st.session_state:
st.session_state.key_issues_result = None
if 'log_messages' not in st.session_state:
st.session_state.log_messages = []
# Placeholder for status updates
status_placeholder = st.empty()
results_placeholder = st.container()
log_placeholder = st.expander("Show Execution Log")
if st.button("Generate Key Issues", type="primary", disabled=not can_run):
if not user_query:
st.error("Please enter a query.")
else:
st.session_state.key_issues_result = None # Clear previous results
st.session_state.log_messages = ["Starting Key Issue generation..."]
with st.spinner("Processing... Building graph and executing workflow..."):
start_time = time.time()
try:
# Build the graph
status_placeholder.info("Building workflow graph...")
app_graph = build_graph()
st.session_state.log_messages.append("Workflow graph built.")
# Define the initial state
initial_state: PlannerState = {
"user_query": user_query,
"messages": [HumanMessage(content=user_query)],
"plan": [],
"current_plan_step_index": -1, # Will be set by start_planning
"step_outputs": {},
"key_issues": [],
"error": None
}
# Configuration for the graph run (e.g., thread_id for memory)
# Using user query hash as a simple thread identifier for memory (if used)
import hashlib
thread_id = hashlib.sha256(user_query.encode()).hexdigest()[:8]
config: GraphConfig = {"configurable": {"thread_id": thread_id}}
status_placeholder.info("Executing workflow... (This may take a while)")
st.session_state.log_messages.append("Invoking graph stream...")
final_state = None
# Stream events for logging/updates
for i, step_state in enumerate(app_graph.stream(initial_state, config=config)):
# step_state is a dictionary where keys are node names
node_name = list(step_state.keys())[0]
node_output = step_state[node_name]
log_msg = f"Step {i+1}: Node '{node_name}' executed."
st.session_state.log_messages.append(log_msg)
# logger.info(log_msg) # Log to console as well
# logger.debug(f"Node output: {node_output}")
# You could update the status placeholder more dynamically here
# status_placeholder.info(f"Executing: {node_name}...")
final_state = node_output # Keep track of the latest state
end_time = time.time()
st.session_state.log_messages.append(f"Workflow finished in {end_time - start_time:.2f} seconds.")
status_placeholder.success(f"Processing Complete! ({end_time - start_time:.2f}s)")
# --- Process Final Results ---
if final_state and not final_state.get("error"):
generated_issues = final_state.get("key_issues", [])
st.session_state.key_issues_result = generated_issues
st.session_state.log_messages.append(f"Successfully extracted {len(generated_issues)} key issues.")
elif final_state and final_state.get("error"):
error_msg = final_state.get("error", "Unknown error")
st.session_state.log_messages.append(f"Workflow failed: {error_msg}")
status_placeholder.error(f"Workflow failed: {error_msg}")
else:
st.session_state.log_messages.append("Workflow finished, but no final state or key issues found.")
status_placeholder.warning("Workflow finished, but no key issues were generated.")
except Exception as e:
end_time = time.time()
logger.error(f"An error occurred during graph execution: {e}", exc_info=True)
status_placeholder.error(f"An unexpected error occurred: {e}")
st.session_state.log_messages.append(f"FATAL ERROR: {e}")
# --- Display Results ---
if st.session_state.key_issues_result:
issues = st.session_state.key_issues_result
results_placeholder.subheader(f"Generated Key Issues ({len(issues)})")
df = key_issues_to_dataframe(issues)
if not df.empty:
# Display as DataFrame
results_placeholder.dataframe(df, use_container_width=True)
# Provide download button
excel_bytes = dataframe_to_excel_bytes(df)
results_placeholder.download_button(
label="📥 Download Key Issues as Excel",
data=excel_bytes,
file_name="key_issues_output.xlsx",
mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
else:
results_placeholder.info("No key issues were generated or parsed correctly.")
# Display logs
with log_placeholder:
st.code("\n".join(st.session_state.log_messages), language="text")