adrienbrdne commited on
Commit
fcf819f
·
verified ·
1 Parent(s): 572a169

Update api.py

Browse files
Files changed (1) hide show
  1. api.py +201 -196
api.py CHANGED
@@ -1,197 +1,202 @@
1
- import logging
2
- import time
3
- import uvicorn
4
- from fastapi import FastAPI, HTTPException
5
- from pydantic import BaseModel
6
- from contextlib import asynccontextmanager
7
- from typing import List, Dict, Any
8
-
9
- # Import necessary components from your kig_core library
10
- # Ensure kig_core is in the Python path or installed as a package
11
- try:
12
- from kig_core.config import settings # Loads config on import
13
- from kig_core.schemas import PlannerState, KeyIssue as KigKeyIssue, GraphConfig
14
- from kig_core.planner import build_graph
15
- from kig_core.graph_client import neo4j_client # Import the initialized client instance
16
- from langchain_core.messages import HumanMessage
17
- except ImportError as e:
18
- print(f"Error importing kig_core components: {e}")
19
- print("Please ensure kig_core is in your Python path or installed.")
20
- # You might want to exit or raise a clearer error if imports fail
21
- raise
22
-
23
- # Configure logging for the API
24
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
25
- logger = logging.getLogger(__name__)
26
-
27
- # --- Pydantic Models for API Request/Response ---
28
-
29
- class KeyIssueRequest(BaseModel):
30
- """Request body containing the user's technical query."""
31
- query: str
32
-
33
- class KeyIssueResponse(BaseModel):
34
- """Response body containing the generated key issues."""
35
- key_issues: List[KigKeyIssue] # Use the KeyIssue schema from kig_core
36
-
37
- # --- Global Variables / State ---
38
- # Keep the graph instance global for efficiency if desired,
39
- # but consider potential concurrency issues if graph/LLMs have state.
40
- # Rebuilding on each request is safer for statelessness.
41
- app_graph = None # Will be initialized at startup
42
-
43
- # --- Application Lifecycle (Startup/Shutdown) ---
44
- @asynccontextmanager
45
- async def lifespan(app: FastAPI):
46
- """Handles startup and shutdown events."""
47
- global app_graph
48
- logger.info("API starting up...")
49
- # Initialize Neo4j client (already done on import by graph_client.py)
50
- # Verify connection (optional, already done by graph_client on init)
51
- try:
52
- logger.info("Verifying Neo4j connection...")
53
- neo4j_client._get_driver().verify_connectivity()
54
- logger.info("Neo4j connection verified.")
55
- except Exception as e:
56
- logger.error(f"Neo4j connection verification failed on startup: {e}", exc_info=True)
57
- # Decide if the app should fail to start
58
- # raise RuntimeError("Failed to connect to Neo4j on startup.") from e
59
-
60
- # Build the LangGraph application
61
- logger.info("Building LangGraph application...")
62
- try:
63
- app_graph = build_graph()
64
- logger.info("LangGraph application built successfully.")
65
- except Exception as e:
66
- logger.error(f"Failed to build LangGraph application on startup: {e}", exc_info=True)
67
- # Decide if the app should fail to start
68
- raise RuntimeError("Failed to build LangGraph on startup.") from e
69
-
70
- yield # API runs here
71
-
72
- # --- Shutdown ---
73
- logger.info("API shutting down...")
74
- # Close Neo4j connection (handled by atexit in graph_client.py)
75
- # neo4j_client.close() # Usually not needed due to atexit registration
76
- logger.info("Neo4j client closed (likely via atexit).")
77
- logger.info("API shutdown complete.")
78
-
79
-
80
- # --- FastAPI Application ---
81
- app = FastAPI(
82
- title="Key Issue Generator API",
83
- description="API to generate Key Issues based on a technical query using LLMs and Neo4j.",
84
- version="1.0.0",
85
- lifespan=lifespan # Use the lifespan context manager
86
- )
87
-
88
- # --- API Endpoint ---
89
- @app.post("/generate-key-issues", response_model=KeyIssueResponse)
90
- async def generate_issues(request: KeyIssueRequest):
91
- """
92
- Accepts a technical query and returns a list of generated Key Issues.
93
- """
94
- global app_graph
95
- if app_graph is None:
96
- logger.error("Graph application is not initialized.")
97
- raise HTTPException(status_code=503, detail="Service Unavailable: Graph not initialized")
98
-
99
- user_query = request.query
100
- if not user_query:
101
- raise HTTPException(status_code=400, detail="Query cannot be empty.")
102
-
103
- logger.info(f"Received request to generate key issues for query: '{user_query[:100]}...'")
104
- start_time = time.time()
105
-
106
- try:
107
- # --- Prepare Initial State for LangGraph ---
108
- # Note: Ensure PlannerState aligns with what build_graph expects
109
- initial_state: PlannerState = {
110
- "user_query": user_query,
111
- "messages": [HumanMessage(content=user_query)],
112
- "plan": [],
113
- "current_plan_step_index": -1, # Or as expected by your graph's entry point
114
- "step_outputs": {},
115
- "key_issues": [],
116
- "error": None
117
- }
118
-
119
- # --- Define Configuration (e.g., Thread ID for Memory) ---
120
- # Using a simple thread ID; adapt if using persistent memory
121
- # import hashlib
122
- # thread_id = hashlib.sha256(user_query.encode()).hexdigest()[:8]
123
- # config: GraphConfig = {"configurable": {"thread_id": thread_id}}
124
- # If not using memory, config can be simpler or empty based on LangGraph version
125
- config: GraphConfig = {"configurable": {}} # Adjust if thread_id/memory is needed
126
-
127
- # --- Execute the LangGraph Workflow ---
128
- logger.info("Invoking LangGraph workflow...")
129
- # Use invoke for a single result, or stream if you need intermediate steps
130
- final_state = await app_graph.ainvoke(initial_state, config=config)
131
- # If using stream:
132
- # final_state = None
133
- # async for step_state in app_graph.astream(initial_state, config=config):
134
- # # Process intermediate states if needed
135
- # node_name = list(step_state.keys())[0]
136
- # logger.debug(f"Graph step completed: {node_name}")
137
- # final_state = step_state[node_name] # Get the latest full state output
138
-
139
- end_time = time.time()
140
- logger.info(f"Workflow finished in {end_time - start_time:.2f} seconds.")
141
-
142
- # --- Process Final Results ---
143
- if final_state is None:
144
- logger.error("Workflow execution did not produce a final state.")
145
- raise HTTPException(status_code=500, detail="Workflow execution failed to produce a result.")
146
-
147
- if final_state.get("error"):
148
- error_msg = final_state.get("error", "Unknown error")
149
- logger.error(f"Workflow failed with error: {error_msg}")
150
- # Map internal errors to appropriate HTTP status codes
151
- status_code = 500 # Internal Server Error by default
152
- if "Neo4j" in error_msg or "connection" in error_msg.lower():
153
- status_code = 503 # Service Unavailable (database issue)
154
- elif "LLM error" in error_msg or "parse" in error_msg.lower():
155
- status_code = 502 # Bad Gateway (issue with upstream LLM)
156
-
157
- raise HTTPException(status_code=status_code, detail=f"Workflow failed: {error_msg}")
158
-
159
- # --- Extract Key Issues ---
160
- # Ensure the structure matches KeyIssueResponse and KigKeyIssue Pydantic model
161
- generated_issues_data = final_state.get("key_issues", [])
162
-
163
- # Validate and convert if necessary (Pydantic usually handles this via response_model)
164
- try:
165
- # Pydantic will validate against KeyIssueResponse -> List[KigKeyIssue]
166
- response_data = {"key_issues": generated_issues_data}
167
- logger.info(f"Successfully generated {len(generated_issues_data)} key issues.")
168
- return response_data
169
- except Exception as pydantic_error: # Catch potential validation errors
170
- logger.error(f"Failed to validate final key issues against response model: {pydantic_error}", exc_info=True)
171
- logger.error(f"Data that failed validation: {generated_issues_data}")
172
- raise HTTPException(status_code=500, detail="Internal error: Failed to format key issues response.")
173
-
174
-
175
- except HTTPException as http_exc:
176
- # Re-raise HTTPExceptions directly
177
- raise http_exc
178
- except ConnectionError as e:
179
- logger.error(f"Connection Error during API request: {e}", exc_info=True)
180
- raise HTTPException(status_code=503, detail=f"Service Unavailable: {e}")
181
- except ValueError as e:
182
- logger.error(f"Value Error during API request: {e}", exc_info=True)
183
- raise HTTPException(status_code=400, detail=f"Bad Request: {e}") # Often input validation issues
184
- except Exception as e:
185
- logger.error(f"An unexpected error occurred during API request: {e}", exc_info=True)
186
- raise HTTPException(status_code=500, detail=f"Internal Server Error: An unexpected error occurred.")
187
-
188
-
189
- # --- How to Run ---
190
- if __name__ == "__main__":
191
- # Make sure to set environment variables for config (NEO4J_URI, NEO4J_PASSWORD, GEMINI_API_KEY, etc.)
192
- # or have a .env file in the same directory where you run this script.
193
- print("Starting API server...")
194
- print("Ensure required environment variables (e.g., NEO4J_URI, NEO4J_PASSWORD, GEMINI_API_KEY) are set or .env file is present.")
195
- # Run with uvicorn: uvicorn api:app --reload --host 0.0.0.0 --port 8000
196
- # The --reload flag is good for development. Remove it for production.
 
 
 
 
 
197
  uvicorn.run("api:app", host="0.0.0.0", port=8000, reload=True) # Use reload=False for production
 
1
+ import logging
2
+ import time
3
+ import uvicorn
4
+ from fastapi import FastAPI, HTTPException
5
+ from pydantic import BaseModel
6
+ from contextlib import asynccontextmanager
7
+ from typing import List, Dict, Any
8
+
9
+ # Import necessary components from your kig_core library
10
+ # Ensure kig_core is in the Python path or installed as a package
11
+ try:
12
+ from kig_core.config import settings # Loads config on import
13
+ from kig_core.schemas import PlannerState, KeyIssue as KigKeyIssue, GraphConfig
14
+ from kig_core.planner import build_graph
15
+ from kig_core.graph_client import neo4j_client # Import the initialized client instance
16
+ from langchain_core.messages import HumanMessage
17
+ except ImportError as e:
18
+ print(f"Error importing kig_core components: {e}")
19
+ print("Please ensure kig_core is in your Python path or installed.")
20
+ # You might want to exit or raise a clearer error if imports fail
21
+ raise
22
+
23
+ # Configure logging for the API
24
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
25
+ logger = logging.getLogger(__name__)
26
+
27
+ # --- Pydantic Models for API Request/Response ---
28
+
29
+ class KeyIssueRequest(BaseModel):
30
+ """Request body containing the user's technical query."""
31
+ query: str
32
+
33
+ class KeyIssueResponse(BaseModel):
34
+ """Response body containing the generated key issues."""
35
+ key_issues: List[KigKeyIssue] # Use the KeyIssue schema from kig_core
36
+
37
+ # --- Global Variables / State ---
38
+ # Keep the graph instance global for efficiency if desired,
39
+ # but consider potential concurrency issues if graph/LLMs have state.
40
+ # Rebuilding on each request is safer for statelessness.
41
+ app_graph = None # Will be initialized at startup
42
+
43
+ # --- Application Lifecycle (Startup/Shutdown) ---
44
+ @asynccontextmanager
45
+ async def lifespan(app: FastAPI):
46
+ """Handles startup and shutdown events."""
47
+ global app_graph
48
+ logger.info("API starting up...")
49
+ # Initialize Neo4j client (already done on import by graph_client.py)
50
+ # Verify connection (optional, already done by graph_client on init)
51
+ try:
52
+ logger.info("Verifying Neo4j connection...")
53
+ neo4j_client._get_driver().verify_connectivity()
54
+ logger.info("Neo4j connection verified.")
55
+ except Exception as e:
56
+ logger.error(f"Neo4j connection verification failed on startup: {e}", exc_info=True)
57
+ # Decide if the app should fail to start
58
+ # raise RuntimeError("Failed to connect to Neo4j on startup.") from e
59
+
60
+ # Build the LangGraph application
61
+ logger.info("Building LangGraph application...")
62
+ try:
63
+ app_graph = build_graph()
64
+ logger.info("LangGraph application built successfully.")
65
+ except Exception as e:
66
+ logger.error(f"Failed to build LangGraph application on startup: {e}", exc_info=True)
67
+ # Decide if the app should fail to start
68
+ raise RuntimeError("Failed to build LangGraph on startup.") from e
69
+
70
+ yield # API runs here
71
+
72
+ # --- Shutdown ---
73
+ logger.info("API shutting down...")
74
+ # Close Neo4j connection (handled by atexit in graph_client.py)
75
+ # neo4j_client.close() # Usually not needed due to atexit registration
76
+ logger.info("Neo4j client closed (likely via atexit).")
77
+ logger.info("API shutdown complete.")
78
+
79
+
80
+ # --- FastAPI Application ---
81
+ app = FastAPI(
82
+ title="Key Issue Generator API",
83
+ description="API to generate Key Issues based on a technical query using LLMs and Neo4j.",
84
+ version="1.0.0",
85
+ lifespan=lifespan # Use the lifespan context manager
86
+ )
87
+
88
+ # --- API Endpoint ---
89
+ # API state check route
90
+ @app.get("/")
91
+ def read_root():
92
+ return {"status": "ok"}
93
+
94
+ @app.post("/generate-key-issues", response_model=KeyIssueResponse)
95
+ async def generate_issues(request: KeyIssueRequest):
96
+ """
97
+ Accepts a technical query and returns a list of generated Key Issues.
98
+ """
99
+ global app_graph
100
+ if app_graph is None:
101
+ logger.error("Graph application is not initialized.")
102
+ raise HTTPException(status_code=503, detail="Service Unavailable: Graph not initialized")
103
+
104
+ user_query = request.query
105
+ if not user_query:
106
+ raise HTTPException(status_code=400, detail="Query cannot be empty.")
107
+
108
+ logger.info(f"Received request to generate key issues for query: '{user_query[:100]}...'")
109
+ start_time = time.time()
110
+
111
+ try:
112
+ # --- Prepare Initial State for LangGraph ---
113
+ # Note: Ensure PlannerState aligns with what build_graph expects
114
+ initial_state: PlannerState = {
115
+ "user_query": user_query,
116
+ "messages": [HumanMessage(content=user_query)],
117
+ "plan": [],
118
+ "current_plan_step_index": -1, # Or as expected by your graph's entry point
119
+ "step_outputs": {},
120
+ "key_issues": [],
121
+ "error": None
122
+ }
123
+
124
+ # --- Define Configuration (e.g., Thread ID for Memory) ---
125
+ # Using a simple thread ID; adapt if using persistent memory
126
+ # import hashlib
127
+ # thread_id = hashlib.sha256(user_query.encode()).hexdigest()[:8]
128
+ # config: GraphConfig = {"configurable": {"thread_id": thread_id}}
129
+ # If not using memory, config can be simpler or empty based on LangGraph version
130
+ config: GraphConfig = {"configurable": {}} # Adjust if thread_id/memory is needed
131
+
132
+ # --- Execute the LangGraph Workflow ---
133
+ logger.info("Invoking LangGraph workflow...")
134
+ # Use invoke for a single result, or stream if you need intermediate steps
135
+ final_state = await app_graph.ainvoke(initial_state, config=config)
136
+ # If using stream:
137
+ # final_state = None
138
+ # async for step_state in app_graph.astream(initial_state, config=config):
139
+ # # Process intermediate states if needed
140
+ # node_name = list(step_state.keys())[0]
141
+ # logger.debug(f"Graph step completed: {node_name}")
142
+ # final_state = step_state[node_name] # Get the latest full state output
143
+
144
+ end_time = time.time()
145
+ logger.info(f"Workflow finished in {end_time - start_time:.2f} seconds.")
146
+
147
+ # --- Process Final Results ---
148
+ if final_state is None:
149
+ logger.error("Workflow execution did not produce a final state.")
150
+ raise HTTPException(status_code=500, detail="Workflow execution failed to produce a result.")
151
+
152
+ if final_state.get("error"):
153
+ error_msg = final_state.get("error", "Unknown error")
154
+ logger.error(f"Workflow failed with error: {error_msg}")
155
+ # Map internal errors to appropriate HTTP status codes
156
+ status_code = 500 # Internal Server Error by default
157
+ if "Neo4j" in error_msg or "connection" in error_msg.lower():
158
+ status_code = 503 # Service Unavailable (database issue)
159
+ elif "LLM error" in error_msg or "parse" in error_msg.lower():
160
+ status_code = 502 # Bad Gateway (issue with upstream LLM)
161
+
162
+ raise HTTPException(status_code=status_code, detail=f"Workflow failed: {error_msg}")
163
+
164
+ # --- Extract Key Issues ---
165
+ # Ensure the structure matches KeyIssueResponse and KigKeyIssue Pydantic model
166
+ generated_issues_data = final_state.get("key_issues", [])
167
+
168
+ # Validate and convert if necessary (Pydantic usually handles this via response_model)
169
+ try:
170
+ # Pydantic will validate against KeyIssueResponse -> List[KigKeyIssue]
171
+ response_data = {"key_issues": generated_issues_data}
172
+ logger.info(f"Successfully generated {len(generated_issues_data)} key issues.")
173
+ return response_data
174
+ except Exception as pydantic_error: # Catch potential validation errors
175
+ logger.error(f"Failed to validate final key issues against response model: {pydantic_error}", exc_info=True)
176
+ logger.error(f"Data that failed validation: {generated_issues_data}")
177
+ raise HTTPException(status_code=500, detail="Internal error: Failed to format key issues response.")
178
+
179
+
180
+ except HTTPException as http_exc:
181
+ # Re-raise HTTPExceptions directly
182
+ raise http_exc
183
+ except ConnectionError as e:
184
+ logger.error(f"Connection Error during API request: {e}", exc_info=True)
185
+ raise HTTPException(status_code=503, detail=f"Service Unavailable: {e}")
186
+ except ValueError as e:
187
+ logger.error(f"Value Error during API request: {e}", exc_info=True)
188
+ raise HTTPException(status_code=400, detail=f"Bad Request: {e}") # Often input validation issues
189
+ except Exception as e:
190
+ logger.error(f"An unexpected error occurred during API request: {e}", exc_info=True)
191
+ raise HTTPException(status_code=500, detail=f"Internal Server Error: An unexpected error occurred.")
192
+
193
+
194
+ # --- How to Run ---
195
+ if __name__ == "__main__":
196
+ # Make sure to set environment variables for config (NEO4J_URI, NEO4J_PASSWORD, GEMINI_API_KEY, etc.)
197
+ # or have a .env file in the same directory where you run this script.
198
+ print("Starting API server...")
199
+ print("Ensure required environment variables (e.g., NEO4J_URI, NEO4J_PASSWORD, GEMINI_API_KEY) are set or .env file is present.")
200
+ # Run with uvicorn: uvicorn api:app --reload --host 0.0.0.0 --port 8000
201
+ # The --reload flag is good for development. Remove it for production.
202
  uvicorn.run("api:app", host="0.0.0.0", port=8000, reload=True) # Use reload=False for production