File size: 18,152 Bytes
1bcef92
 
 
ad78361
 
 
 
1bcef92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad78361
 
 
 
 
 
 
 
1bcef92
 
 
 
 
 
 
 
 
 
 
 
 
 
ad78361
 
 
 
 
 
 
 
 
 
 
 
 
1bcef92
 
 
 
 
ad78361
1bcef92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad78361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1bcef92
 
 
 
 
 
 
 
 
 
 
 
ad78361
 
 
1bcef92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad78361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1bcef92
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
import logging
import time
import uvicorn
import requests
import os # Added import for environment variables

from fastapi import FastAPI, HTTPException, Body
from pydantic import BaseModel
from contextlib import asynccontextmanager
from typing import List, Dict, Any

# Import necessary components from your kig_core library
# Ensure kig_core is in the Python path or installed as a package
try:
    from kig_core.config import settings # Loads config on import
    from kig_core.schemas import PlannerState, KeyIssue as KigKeyIssue, GraphConfig
    from kig_core.planner import build_graph
    from kig_core.graph_client import neo4j_client # Import the initialized client instance
    from langchain_core.messages import HumanMessage
except ImportError as e:
    print(f"Error importing kig_core components: {e}")
    print("Please ensure kig_core is in your Python path or installed.")
    # You might want to exit or raise a clearer error if imports fail
    raise

# Added imports for Gemini
try:
    import google.generativeai as genai
    from google.generativeai.types import GenerationConfig, Content, Part # Corrected import path
except ImportError:
    print("google.generativeai library not found. Please install it: pip install google-generativeai")
    genai = None # Set to None if import fails

# Configure logging for the API
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# --- Pydantic Models for API Request/Response ---

class KeyIssueRequest(BaseModel):
    """Request body containing the user's technical query."""
    query: str

class KeyIssueResponse(BaseModel):
    """Response body containing the generated key issues."""
    key_issues: List[KigKeyIssue] # Use the KeyIssue schema from kig_core

class SpecificityEvaluationRequest(BaseModel):
    title: str
    description: str
    technical_topic: str

class SpecificityScore(BaseModel):
    predicted_class: str
    score: float

class SpecificityEvaluationResponse(BaseModel):
    problematic: str
    specificity: SpecificityScore

# --- Global Variables / State ---
# Keep the graph instance global for efficiency if desired,
# but consider potential concurrency issues if graph/LLMs have state.
# Rebuilding on each request is safer for statelessness.
app_graph = None # Will be initialized at startup
gemini_client = None # Will be initialized at startup

# --- Application Lifecycle (Startup/Shutdown) ---
@asynccontextmanager
async def lifespan(app: FastAPI):
    """Handles startup and shutdown events."""
    global app_graph
    logger.info("API starting up...")
    # Initialize Neo4j client (already done on import by graph_client.py)
    # Verify connection (optional, already done by graph_client on init)
    try:
        logger.info("Verifying Neo4j connection...")
        neo4j_client._get_driver().verify_connectivity()
        logger.info("Neo4j connection verified.")
    except Exception as e:
        logger.error(f"Neo4j connection verification failed on startup: {e}", exc_info=True)
        # Decide if the app should fail to start
        # raise RuntimeError("Failed to connect to Neo4j on startup.") from e

    # Build the LangGraph application
    logger.info("Building LangGraph application...")
    try:
        app_graph = build_graph()
        logger.info("LangGraph application built successfully.")
    except Exception as e:
        logger.error(f"Failed to build LangGraph application on startup: {e}", exc_info=True)
        # Decide if the app should fail to start
        raise RuntimeError("Failed to build LangGraph on startup.") from e

    # Initialize Gemini Client
    logger.info("Initializing Gemini client...")
    if genai:
        try:
            # Assuming GEMINI_API_KEY is set in environment or loaded via settings
            api_key = os.getenv("GEMINI_API_KEY") or getattr(settings, "GEMINI_API_KEY", None)
            if not api_key:
                 raise ValueError("GEMINI_API_KEY not found in environment or settings.")
            genai.configure(api_key=api_key)
            # Optionally, you could create a specific model instance here if needed frequently
            # gemini_client = genai.GenerativeModel(...)
            logger.info("Gemini client configured successfully.")
        except Exception as e:
            logger.error(f"Failed to configure Gemini client: {e}", exc_info=True)
            # Decide if the app should fail to start or just log the error
            # gemini_client will remain None, endpoints using it will fail
    else:
        logger.warning("Gemini library not imported. Endpoints requiring Gemini will not work.")

    yield # API runs here

    # --- Shutdown ---
    logger.info("API shutting down...")
    # Close Neo4j connection (handled by atexit in graph_client.py)
    # neo4j_client.close() # Usually not needed due to atexit registration
    logger.info("Neo4j client closed (likely via atexit).")
    logger.info("API shutdown complete.")


# --- FastAPI Application ---
app = FastAPI(
    title="Key Issue Generator Specificity API",
    description="API to generate Key Issues based on a technical query using LLMs and Neo4j and evaluate problematic specificity.",
    version="1.1.0",
    lifespan=lifespan # Use the lifespan context manager
)

# --- API Endpoint ---
# API state check route
@app.get("/")
def read_root():
    return {"status": "ok"}

@app.post("/generate-key-issues", response_model=KeyIssueResponse)
async def generate_issues(request: KeyIssueRequest):
    """
    Accepts a technical query and returns a list of generated Key Issues.
    """
    global app_graph
    if app_graph is None:
        logger.error("Graph application is not initialized.")
        raise HTTPException(status_code=503, detail="Service Unavailable: Graph not initialized")

    user_query = request.query
    if not user_query:
        raise HTTPException(status_code=400, detail="Query cannot be empty.")

    logger.info(f"Received request to generate key issues for query: '{user_query[:100]}...'")
    start_time = time.time()

    try:
        # --- Prepare Initial State for LangGraph ---
        # Note: Ensure PlannerState aligns with what build_graph expects
        initial_state: PlannerState = {
            "user_query": user_query,
            "messages": [HumanMessage(content=user_query)],
            "plan": [],
            "current_plan_step_index": -1, # Or as expected by your graph's entry point
            "step_outputs": {},
            "key_issues": [],
            "error": None
        }

        # --- Define Configuration (e.g., Thread ID for Memory) ---
        # Using a simple thread ID; adapt if using persistent memory
        # import hashlib
        # thread_id = hashlib.sha256(user_query.encode()).hexdigest()[:8]
        # config: GraphConfig = {"configurable": {"thread_id": thread_id}}
        # If not using memory, config can be simpler or empty based on LangGraph version
        config: GraphConfig = {"configurable": {}} # Adjust if thread_id/memory is needed

        # --- Execute the LangGraph Workflow ---
        logger.info("Invoking LangGraph workflow...")
        # Use invoke for a single result, or stream if you need intermediate steps
        final_state = await app_graph.ainvoke(initial_state, config=config)
        # If using stream:
        # final_state = None
        # async for step_state in app_graph.astream(initial_state, config=config):
        #     # Process intermediate states if needed
        #     node_name = list(step_state.keys())[0]
        #     logger.debug(f"Graph step completed: {node_name}")
        #     final_state = step_state[node_name] # Get the latest full state output

        end_time = time.time()
        logger.info(f"Workflow finished in {end_time - start_time:.2f} seconds.")

        # --- Process Final Results ---
        if final_state is None:
             logger.error("Workflow execution did not produce a final state.")
             raise HTTPException(status_code=500, detail="Workflow execution failed to produce a result.")

        if final_state.get("error"):
            error_msg = final_state.get("error", "Unknown error")
            logger.error(f"Workflow failed with error: {error_msg}")
            # Map internal errors to appropriate HTTP status codes
            status_code = 500 # Internal Server Error by default
            if "Neo4j" in error_msg or "connection" in error_msg.lower():
                status_code = 503 # Service Unavailable (database issue)
            elif "LLM error" in error_msg or "parse" in error_msg.lower():
                 status_code = 502 # Bad Gateway (issue with upstream LLM)

            raise HTTPException(status_code=status_code, detail=f"Workflow failed: {error_msg}")

        # --- Extract Key Issues ---
        # Ensure the structure matches KeyIssueResponse and KigKeyIssue Pydantic model
        generated_issues_data = final_state.get("key_issues", [])

        # Validate and convert if necessary (Pydantic usually handles this via response_model)
        try:
            # Pydantic will validate against KeyIssueResponse -> List[KigKeyIssue]
            response_data = {"key_issues": generated_issues_data}
            logger.info(f"Successfully generated {len(generated_issues_data)} key issues.")
            return response_data
        except Exception as pydantic_error: # Catch potential validation errors
             logger.error(f"Failed to validate final key issues against response model: {pydantic_error}", exc_info=True)
             logger.error(f"Data that failed validation: {generated_issues_data}")
             raise HTTPException(status_code=500, detail="Internal error: Failed to format key issues response.")


    except HTTPException as http_exc:
        # Re-raise HTTPExceptions directly
        raise http_exc
    except ConnectionError as e:
        logger.error(f"Connection Error during API request: {e}", exc_info=True)
        raise HTTPException(status_code=503, detail=f"Service Unavailable: {e}")
    except ValueError as e:
         logger.error(f"Value Error during API request: {e}", exc_info=True)
         raise HTTPException(status_code=400, detail=f"Bad Request: {e}") # Often input validation issues
    except Exception as e:
        logger.error(f"An unexpected error occurred during API request: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"Internal Server Error: An unexpected error occurred.")


@app.post("/evaluate-specificity", response_model=SpecificityEvaluationResponse)
async def evaluation(request: SpecificityEvaluationRequest):
    """
    Generates a technical problematic using Gemini based on title, description,
    and topic, then evaluates its specificity using an external API (fine-tuned model for specificity).
    """
    # Check if Gemini library was imported and configured
    if not genai or not genai.is_configured():
        logger.error("Gemini client is not available or configured.")
        raise HTTPException(status_code=503, detail="Service Unavailable: Gemini client not configured")

    title = request.title
    description = request.description
    technical_topic = request.technical_topic

    if not all([title, description, technical_topic]):
        raise HTTPException(status_code=400, detail="Missing required fields: title, description, or technical_topic.")

    logger.info("Received request for specificity evaluation.")
    logger.debug(f"Title: {title}, Topic: {technical_topic}") # Avoid logging full description unless needed

    # --- 1. Generate Problematic using Gemini ---
    prompt = f"""I want you to create a technical problematic using a key issue composed of a title and a detailed description.
    Here is the title of the key issue to deal with: <title>{title}</title>
    
    And here is the associated description: <description>{description}</description>
    
    This key issue is part of the following technical topic: <technical_topic>{technical_topic}</technical_topic>
    
    The problematic must be in interrogative form.
    As the output, I only want you to provide the problematic found, nothing else.
    
    Here are examples of problematics that you could create, it shows the level of specificity that you should aim for:
    
    Example 1: 'How can a method for allocating radio resources in a non-GSO satellite communication system, operating in frequency bands shared with geostationary satellite systems and particularly in high frequency bands such as Ka, minimize interference to geostationary systems, without causing reduced geographic coverage due to fixed high separation angle thresholds or incurring cost and sub-optimality from over-dimensioning the non-GSO satellite constellation?'
    Example 2: 'How to address the vulnerability of on-aircraft avionics software update integrity checks to system compromises and the logistical challenges of cryptographic key management in digital signature solutions, in order to establish a more secure and logistically efficient method for updating safety-critical avionics equipment?'
    Example 3: 'How can SIM cards be protected against collision attacks that aim to retrieve the secret key Ki by analyzing the input and output of the authentication algorithm during the standard GSM authentication process, given that current tamper-proof measures are insufficient to prevent this type of key extraction?'
    Example 4: 'How can a Trusted Application in a GlobalPlatform compliant TEE overcome the GP specification limitations that enforce client blocking during task execution, prevent partial task execution, and delete TA execution context between commands, to function as a persistent server with stateful sessions and asynchronous communication capabilities, thereby enabling server functionalities like continuous listening and non-blocking send/receive, currently impossible due to GP's sequential task processing and stateless TA operation?'
    
    As far as possible, avoid using acronyms in the problematic.
    Try to be about the same length as the examples if possible."""

    try:
        logger.info("Calling Gemini API to generate problematic...")
        # Use the specified model and configuration
        model_name = "gemini-1.5-flash-latest" # Changed to a generally available model
        model = genai.GenerativeModel(model_name)

        # Prepare contents (ensure correct structure for the library version)
        contents = [Content(role="user", parts=[Part.from_text(text=prompt)])]

        # Define generation config
        generate_config = GenerationConfig(response_mime_type="text/plain") # Updated parameter name

        # Make the API call
        response = await model.generate_content_async(
            contents=contents,
            generation_config=generate_config # Pass config object
        )

        # Extract the result
        problematic_result = response.text.strip()
        logger.info("Successfully generated problematic from Gemini.")
        logger.debug(f"Generated problematic: {problematic_result[:200]}...") # Log snippet

    except Exception as e:
        logger.error(f"Error calling Gemini API: {e}", exc_info=True)
        # Check for specific Gemini API errors if possible
        raise HTTPException(status_code=502, detail=f"Failed to generate problematic using LLM: {e}")

    if not problematic_result:
         logger.error("Gemini API returned an empty result.")
         raise HTTPException(status_code=502, detail="LLM returned an empty problematic.")

    # --- 2. Evaluate Specificity using External API ---
    API_URL = "https://organizedprogrammers-fastapi-problematic-specificity.hf.space"
    endpoint = f"{API_URL}/predict"
    data = {"text": problematic_result}

    try:
        logger.info(f"Calling specificity prediction API at {endpoint}...")
        prediction_response = requests.post(endpoint, json=data, timeout=30) # Added timeout
        prediction_response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)

        score_data = prediction_response.json()
        logger.info(f"Successfully received specificity score: {score_data}")

        # Validate the received score data against Pydantic model
        try:
            specificity_score = SpecificityScore(**score_data)
        except Exception as pydantic_error: # Catch validation errors
            logger.error(f"Failed to validate specificity score response: {pydantic_error}", exc_info=True)
            logger.error(f"Invalid data received from specificity API: {score_data}")
            raise HTTPException(status_code=502, detail="Invalid response format from specificity prediction API.")

    except requests.exceptions.RequestException as e:
        logger.error(f"Error calling specificity prediction API: {e}", exc_info=True)
        raise HTTPException(status_code=502, detail=f"Failed to call specificity prediction API: {e}")
    except Exception as e: # Catch other potential errors like JSON decoding
         logger.error(f"Unexpected error during specificity evaluation: {e}", exc_info=True)
         raise HTTPException(status_code=500, detail=f"Internal error during specificity evaluation: {e}")


    # --- 3. Return Combined Result ---
    final_response = SpecificityEvaluationResponse(
        problematic=problematic_result,
        specificity=specificity_score
    )

    return final_response


# --- How to Run ---
if __name__ == "__main__":
    # Make sure to set environment variables for config (NEO4J_URI, NEO4J_PASSWORD, GEMINI_API_KEY, etc.)
    # or have a .env file in the same directory where you run this script.
    print("Starting API server...")
    print("Ensure required environment variables (e.g., NEO4J_URI, NEO4J_PASSWORD, GEMINI_API_KEY) are set or .env file is present.")
    # Run with uvicorn: uvicorn api:app --reload --host 0.0.0.0 --port 8000
    # The --reload flag is good for development. Remove it for production.
    uvicorn.run("api:app", host="0.0.0.0", port=8000, reload=True) # Use reload=False for production