Spaces:
Running
Running
File size: 7,850 Bytes
ec6d5f9 04fe71e ec6d5f9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
import streamlit as st
import pandas as pd
import logging
import time
import json # For displaying dicts/lists nicely
# Import core components from the refactored library
from kig_core.config import settings # Loads config on import
from kig_core.schemas import PlannerState, KeyIssue, GraphConfig
from kig_core.planner import build_graph
from kig_core.utils import key_issues_to_dataframe, dataframe_to_excel_bytes
from kig_core.graph_client import neo4j_client # Import the initialized client instance
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
# Configure logging for Streamlit app
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# --- Streamlit Page Configuration ---
st.set_page_config(page_title="Key Issue Generator (KIG)", layout="wide")
st.title(" KIG - Key Issue Generator ")
st.write("Generate structured Key Issues from knowledge graph context.")
# --- Sidebar ---
with st.sidebar:
st.header(" Status & Info ")
# Check Neo4j Connectivity on startup
neo4j_status = st.empty()
try:
# Accessing the client instance will trigger verification if not already done
neo4j_client._get_driver() # Ensure connection is attempted
neo4j_status.success("Neo4j Connection Verified")
can_run = True
except ConnectionError as e:
neo4j_status.error(f"Neo4j Error: {e}")
can_run = False
except Exception as e:
neo4j_status.error(f"Neo4j Init Error: {e}")
can_run = False
st.header("Configuration")
# Display some key settings (be careful with secrets)
st.text(f"Main LLM: {settings.main_llm_model}")
st.text(f"Neo4j URI: {settings.neo4j_uri}")
st.text(f"Plan Method: {settings.plan_method}")
st.text(f"Max Docs: {settings.max_docs}")
st.header("About")
st.info("""
This app uses LLMs and a Neo4j graph to:
1. Plan an approach based on your query.
2. Execute the plan, retrieving & processing graph data.
3. Generate structured Key Issues.
4. Output results to an Excel file.
""")
# --- Main Application Logic ---
st.header("Enter Your Query")
user_query = st.text_area(
"Describe the technical requirement or area you want to explore for Key Issues:",
"What are the main challenges and potential key issues in deploying edge computing for real-time AI-driven traffic management systems in smart cities?",
height=150
)
# Session state to store results across reruns if needed
if 'key_issues_result' not in st.session_state:
st.session_state.key_issues_result = None
if 'log_messages' not in st.session_state:
st.session_state.log_messages = []
# Placeholder for status updates
status_placeholder = st.empty()
results_placeholder = st.container()
log_placeholder = st.expander("Show Execution Log")
if st.button("Generate Key Issues", type="primary", disabled=not can_run):
if not user_query:
st.error("Please enter a query.")
else:
st.session_state.key_issues_result = None # Clear previous results
st.session_state.log_messages = ["Starting Key Issue generation..."]
with st.spinner("Processing... Building graph and executing workflow..."):
start_time = time.time()
try:
# Build the graph
status_placeholder.info("Building workflow graph...")
app_graph = build_graph()
st.session_state.log_messages.append("Workflow graph built.")
# Define the initial state
initial_state: PlannerState = {
"user_query": user_query,
"messages": [HumanMessage(content=user_query)],
"plan": [],
"current_plan_step_index": -1, # Will be set by start_planning
"step_outputs": {},
"key_issues": [],
"error": None
}
# Configuration for the graph run (e.g., thread_id for memory)
# Using user query hash as a simple thread identifier for memory (if used)
import hashlib
thread_id = hashlib.sha256(user_query.encode()).hexdigest()[:8]
config: GraphConfig = {"configurable": {"thread_id": thread_id}}
status_placeholder.info("Executing workflow... (This may take a while)")
st.session_state.log_messages.append("Invoking graph stream...")
final_state = None
# Stream events for logging/updates
for i, step_state in enumerate(app_graph.stream(initial_state, config=config)):
# step_state is a dictionary where keys are node names
node_name = list(step_state.keys())[0]
node_output = step_state[node_name]
log_msg = f"Step {i+1}: Node '{node_name}' executed."
st.session_state.log_messages.append(log_msg)
# logger.info(log_msg) # Log to console as well
# logger.debug(f"Node output: {node_output}")
# You could update the status placeholder more dynamically here
# status_placeholder.info(f"Executing: {node_name}...")
final_state = node_output # Keep track of the latest state
end_time = time.time()
st.session_state.log_messages.append(f"Workflow finished in {end_time - start_time:.2f} seconds.")
status_placeholder.success(f"Processing Complete! ({end_time - start_time:.2f}s)")
# --- Process Final Results ---
if final_state and not final_state.get("error"):
generated_issues = final_state.get("key_issues", [])
st.session_state.key_issues_result = generated_issues
st.session_state.log_messages.append(f"Successfully extracted {len(generated_issues)} key issues.")
elif final_state and final_state.get("error"):
error_msg = final_state.get("error", "Unknown error")
st.session_state.log_messages.append(f"Workflow failed: {error_msg}")
status_placeholder.error(f"Workflow failed: {error_msg}")
else:
st.session_state.log_messages.append("Workflow finished, but no final state or key issues found.")
status_placeholder.warning("Workflow finished, but no key issues were generated.")
except Exception as e:
end_time = time.time()
logger.error(f"An error occurred during graph execution: {e}", exc_info=True)
status_placeholder.error(f"An unexpected error occurred: {e}")
st.session_state.log_messages.append(f"FATAL ERROR: {e}")
# --- Display Results ---
if st.session_state.key_issues_result:
issues = st.session_state.key_issues_result
results_placeholder.subheader(f"Generated Key Issues ({len(issues)})")
df = key_issues_to_dataframe(issues)
if not df.empty:
# Display as DataFrame
results_placeholder.dataframe(df, use_container_width=True)
# Provide download button
excel_bytes = dataframe_to_excel_bytes(df)
results_placeholder.download_button(
label="📥 Download Key Issues as Excel",
data=excel_bytes,
file_name="key_issues_output.xlsx",
mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
else:
results_placeholder.info("No key issues were generated or parsed correctly.")
# Display logs
with log_placeholder:
st.code("\n".join(st.session_state.log_messages), language="text") |