File size: 36,948 Bytes
1bcef92
 
 
ad78361
 
 
 
af84f08
c24aa63
1bcef92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
616d2f3
 
ad78361
1bcef92
 
 
 
 
 
 
 
 
 
 
 
 
 
ad78361
 
 
 
 
 
 
 
 
 
 
 
 
2d42029
a87616b
2d42029
 
 
 
 
c0da7dd
 
 
 
 
 
 
 
 
 
 
 
50e92f4
 
 
 
 
 
 
 
1bcef92
 
 
 
 
ad78361
1bcef92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad78361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1bcef92
 
 
 
 
 
 
 
 
 
 
 
2d42029
 
ad78361
1bcef92
 
 
a87616b
 
 
 
c8397bf
a87616b
 
ae438a7
a87616b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1bcef92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad78361
 
 
 
 
 
 
74b8a5e
ad78361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3347729
ad78361
 
3347729
ad78361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d42029
 
 
a87616b
 
2d42029
a87616b
3da3c5b
a87616b
 
 
 
2d42029
 
a87616b
 
 
 
 
2d42029
a87616b
 
2d42029
a87616b
 
2d42029
a87616b
 
 
 
2d42029
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c0da7dd
 
 
 
 
 
 
 
 
 
 
 
 
399006c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420dba4
399006c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1bcef92
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
import logging
import time
import uvicorn
import requests
import os # Added import for environment variables

from fastapi import FastAPI, HTTPException, Body
from fastapi.responses import PlainTextResponse
from pydantic import BaseModel, Field
from contextlib import asynccontextmanager
from typing import List, Dict, Any

# Import necessary components from your kig_core library
# Ensure kig_core is in the Python path or installed as a package
try:
    from kig_core.config import settings # Loads config on import
    from kig_core.schemas import PlannerState, KeyIssue as KigKeyIssue, GraphConfig
    from kig_core.planner import build_graph
    from kig_core.graph_client import neo4j_client # Import the initialized client instance
    from langchain_core.messages import HumanMessage
except ImportError as e:
    print(f"Error importing kig_core components: {e}")
    print("Please ensure kig_core is in your Python path or installed.")
    # You might want to exit or raise a clearer error if imports fail
    raise


import google.generativeai as genai

# Configure logging for the API
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# --- Pydantic Models for API Request/Response ---

class KeyIssueRequest(BaseModel):
    """Request body containing the user's technical query."""
    query: str

class KeyIssueResponse(BaseModel):
    """Response body containing the generated key issues."""
    key_issues: List[KigKeyIssue] # Use the KeyIssue schema from kig_core

class SpecificityEvaluationRequest(BaseModel):
    title: str
    description: str
    technical_topic: str

class SpecificityScore(BaseModel):
    predicted_class: str
    score: float

class SpecificityEvaluationResponse(BaseModel):
    problematic: str
    specificity: SpecificityScore

class ProblemDescriptionRequest(BaseModel):
    descriptions: List[str]
    technical_topic: str

class ProblemDescriptionResponse(BaseModel):
    problem_description: str

# Format KI

class FormattedKeyIssue(BaseModel):
    id: int
    title: str
    description: str
    challenges: List[str]
    potential_impact: str = Field(..., alias="potential_impact") # alias can be useful if field names differ

class KeyIssueFormatResponse(BaseModel):
    key_issues: List[FormattedKeyIssue]

class CreateSeveralProbDescRequest(BaseModel):
    descriptions: Dict[int, str]
    challenges: Dict[int, List[str]]
    technical_topic: str

class CreateSeveralProbDescResponse(BaseModel):
    problem_descriptions: List[str]

# --- Global Variables / State ---
# Keep the graph instance global for efficiency if desired,
# but consider potential concurrency issues if graph/LLMs have state.
# Rebuilding on each request is safer for statelessness.
app_graph = None # Will be initialized at startup
gemini_client = None # Will be initialized at startup

# --- Application Lifecycle (Startup/Shutdown) ---
@asynccontextmanager
async def lifespan(app: FastAPI):
    """Handles startup and shutdown events."""
    global app_graph
    logger.info("API starting up...")
    # Initialize Neo4j client (already done on import by graph_client.py)
    # Verify connection (optional, already done by graph_client on init)
    try:
        logger.info("Verifying Neo4j connection...")
        neo4j_client._get_driver().verify_connectivity()
        logger.info("Neo4j connection verified.")
    except Exception as e:
        logger.error(f"Neo4j connection verification failed on startup: {e}", exc_info=True)
        # Decide if the app should fail to start
        # raise RuntimeError("Failed to connect to Neo4j on startup.") from e

    # Build the LangGraph application
    logger.info("Building LangGraph application...")
    try:
        app_graph = build_graph()
        logger.info("LangGraph application built successfully.")
    except Exception as e:
        logger.error(f"Failed to build LangGraph application on startup: {e}", exc_info=True)
        # Decide if the app should fail to start
        raise RuntimeError("Failed to build LangGraph on startup.") from e

    # Initialize Gemini Client
    logger.info("Initializing Gemini client...")
    if genai:
        try:
            # Assuming GEMINI_API_KEY is set in environment or loaded via settings
            api_key = os.getenv("GEMINI_API_KEY") or getattr(settings, "GEMINI_API_KEY", None)
            if not api_key:
                 raise ValueError("GEMINI_API_KEY not found in environment or settings.")
            genai.configure(api_key=api_key)
            logger.info("Gemini client configured successfully.")
        except Exception as e:
            logger.error(f"Failed to configure Gemini client: {e}", exc_info=True)
    else:
        logger.warning("Gemini library not imported. Endpoints requiring Gemini will not work.")

    yield # API runs here

    # --- Shutdown ---
    logger.info("API shutting down...")
    # Close Neo4j connection (handled by atexit in graph_client.py)
    # neo4j_client.close() # Usually not needed due to atexit registration
    logger.info("Neo4j client closed (likely via atexit).")
    logger.info("API shutdown complete.")


# --- FastAPI Application ---
app = FastAPI(
    title="Key Issue Generator, Specificity, and Description API",
    description="API to generate Key Issues, evaluate problematic specificity, and generate problem descriptions.",
    version="1.1.0",
    lifespan=lifespan # Use the lifespan context manager
)

# --- Helper Functions for Prompt Building ---

def format_ki_descriptions(descriptions: list[str]) -> str:
  formatted_descriptions = ""
  for ind in range(len(descriptions)):
    ki_number = ind + 1
    formatted_description = f"Here is the description of the key issue {ki_number}: <description{ki_number}>{descriptions[ind]}<description{ki_number}>"
    if ind == len(descriptions) - 1:
      formatted_descriptions += formatted_description
    else:
      formatted_descriptions += formatted_description + "\n"
  return formatted_descriptions

def build_prompt(descriptions: list[str], technical_topic: str) -> str | None:
  prompt = None
  if len(descriptions) == 1:
    prompt = f"""I want you to create a problem description using a key issue explained in a detailed description.
Here is the description of the key issue: <description>{descriptions[0]}</description>

This key issue is part of the following technical topic: <technical_topic>{technical_topic}</technical_topic>

As the output, I only want you to provide the problem description found, nothing else.

Here are examples of problem descriptions that you could create, it shows the level of specificity that you should aim for:

Example 1: 'I am working on enhancing security in 4G and 5G telecommunications networks, specifically in the area of network slicing. My goal is to address vulnerabilities in the isolation of slices and improve the integrity of data transmission across different network slices, ensuring they are properly protected against unauthorized access and attacks.'
Example 2: 'I am working on improving user authentication in the context of 3GPP (3rd Generation Partnership Project) networks. Specifically, I need to solve issues related to the security and efficiency of the authentication process in the 5G network architecture, focusing on the use of cryptographic keys and identity management.'

As far as possible, avoid using acronyms in the problem description.
Try to be about the same length as the examples if possible."""

  elif len(descriptions) > 1:
    formatted_descriptions = format_ki_descriptions(descriptions)
    prompt = f"""I want you to create a problem description using several key issues, each explained with a detailed description.

{formatted_descriptions}

These key issues are part of the following technical topic: <technical_topic>{technical_topic}<technical_topic>

As the output, I only want you to provide the problem description found, nothing else.

Here are examples of problem descriptions that you could create, it shows the level of specificity that you should aim for:

Example 1: 'I am working on enhancing security in 4G and 5G telecommunications networks, specifically in the area of network slicing.
My goal is to address vulnerabilities in the isolation of slices and improve the integrity of data transmission across different network slices,
ensuring they are properly protected against unauthorized access and attacks.'

Example 2: 'I am working on improving user authentication in the context of 3GPP (3rd Generation Partnership Project) networks.
Specifically, I need to solve issues related to the security and efficiency of the authentication process in the 5G network architecture,
focusing on the use of cryptographic keys and identity management.'

As far as possible, avoid using acronyms in the problem description.
When creating the problem description that combines the topics of the key issues, be as intelligible as possible and ensure that the mix of key issues makes sense.
The description of the problem should not exceed 100 words or so."""

  return prompt

# --- API Endpoint ---
# API state check route
@app.get("/")
def read_root():
    return {"status": "ok"}

@app.post("/generate-key-issues", response_model=KeyIssueResponse)
async def generate_issues(request: KeyIssueRequest):
    """
    Accepts a technical query and returns a list of generated Key Issues.
    """
    global app_graph
    if app_graph is None:
        logger.error("Graph application is not initialized.")
        raise HTTPException(status_code=503, detail="Service Unavailable: Graph not initialized")

    user_query = request.query
    if not user_query:
        raise HTTPException(status_code=400, detail="Query cannot be empty.")

    logger.info(f"Received request to generate key issues for query: '{user_query[:100]}...'")
    start_time = time.time()

    try:
        # --- Prepare Initial State for LangGraph ---
        # Note: Ensure PlannerState aligns with what build_graph expects
        initial_state: PlannerState = {
            "user_query": user_query,
            "messages": [HumanMessage(content=user_query)],
            "plan": [],
            "current_plan_step_index": -1, # Or as expected by your graph's entry point
            "step_outputs": {},
            "key_issues": [],
            "error": None
        }

        # --- Define Configuration (e.g., Thread ID for Memory) ---
        # Using a simple thread ID; adapt if using persistent memory
        # import hashlib
        # thread_id = hashlib.sha256(user_query.encode()).hexdigest()[:8]
        # config: GraphConfig = {"configurable": {"thread_id": thread_id}}
        # If not using memory, config can be simpler or empty based on LangGraph version
        config: GraphConfig = {"configurable": {}} # Adjust if thread_id/memory is needed

        # --- Execute the LangGraph Workflow ---
        logger.info("Invoking LangGraph workflow...")
        # Use invoke for a single result, or stream if you need intermediate steps
        final_state = await app_graph.ainvoke(initial_state, config=config)
        # If using stream:
        # final_state = None
        # async for step_state in app_graph.astream(initial_state, config=config):
        #     # Process intermediate states if needed
        #     node_name = list(step_state.keys())[0]
        #     logger.debug(f"Graph step completed: {node_name}")
        #     final_state = step_state[node_name] # Get the latest full state output

        end_time = time.time()
        logger.info(f"Workflow finished in {end_time - start_time:.2f} seconds.")

        # --- Process Final Results ---
        if final_state is None:
             logger.error("Workflow execution did not produce a final state.")
             raise HTTPException(status_code=500, detail="Workflow execution failed to produce a result.")

        if final_state.get("error"):
            error_msg = final_state.get("error", "Unknown error")
            logger.error(f"Workflow failed with error: {error_msg}")
            # Map internal errors to appropriate HTTP status codes
            status_code = 500 # Internal Server Error by default
            if "Neo4j" in error_msg or "connection" in error_msg.lower():
                status_code = 503 # Service Unavailable (database issue)
            elif "LLM error" in error_msg or "parse" in error_msg.lower():
                 status_code = 502 # Bad Gateway (issue with upstream LLM)

            raise HTTPException(status_code=status_code, detail=f"Workflow failed: {error_msg}")

        # --- Extract Key Issues ---
        # Ensure the structure matches KeyIssueResponse and KigKeyIssue Pydantic model
        generated_issues_data = final_state.get("key_issues", [])

        # Validate and convert if necessary (Pydantic usually handles this via response_model)
        try:
            # Pydantic will validate against KeyIssueResponse -> List[KigKeyIssue]
            response_data = {"key_issues": generated_issues_data}
            logger.info(f"Successfully generated {len(generated_issues_data)} key issues.")
            return response_data
        except Exception as pydantic_error: # Catch potential validation errors
             logger.error(f"Failed to validate final key issues against response model: {pydantic_error}", exc_info=True)
             logger.error(f"Data that failed validation: {generated_issues_data}")
             raise HTTPException(status_code=500, detail="Internal error: Failed to format key issues response.")


    except HTTPException as http_exc:
        # Re-raise HTTPExceptions directly
        raise http_exc
    except ConnectionError as e:
        logger.error(f"Connection Error during API request: {e}", exc_info=True)
        raise HTTPException(status_code=503, detail=f"Service Unavailable: {e}")
    except ValueError as e:
         logger.error(f"Value Error during API request: {e}", exc_info=True)
         raise HTTPException(status_code=400, detail=f"Bad Request: {e}") # Often input validation issues
    except Exception as e:
        logger.error(f"An unexpected error occurred during API request: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"Internal Server Error: An unexpected error occurred.")


@app.post("/evaluate-specificity", response_model=SpecificityEvaluationResponse)
async def evaluation(request: SpecificityEvaluationRequest):
    """
    Generates a technical problematic using Gemini based on title, description,
    and topic, then evaluates its specificity using an external API (fine-tuned model for specificity).
    """
    # Check if Gemini library was imported and configured
    if not genai:
        logger.error("Gemini client is not available or configured.")
        raise HTTPException(status_code=503, detail="Service Unavailable: Gemini client not configured")

    title = request.title
    description = request.description
    technical_topic = request.technical_topic

    if not all([title, description, technical_topic]):
        raise HTTPException(status_code=400, detail="Missing required fields: title, description, or technical_topic.")

    logger.info("Received request for specificity evaluation.")
    logger.debug(f"Title: {title}, Topic: {technical_topic}") # Avoid logging full description unless needed

    # --- 1. Generate Problematic using Gemini ---
    prompt = f"""I want you to create a technical problematic using a key issue composed of a title and a detailed description.
    Here is the title of the key issue to deal with: <title>{title}</title>
    
    And here is the associated description: <description>{description}</description>
    
    This key issue is part of the following technical topic: <technical_topic>{technical_topic}</technical_topic>
    
    The problematic must be in interrogative form.
    As the output, I only want you to provide the problematic found, nothing else.
    
    Here are examples of problematics that you could create, it shows the level of specificity that you should aim for:
    
    Example 1: 'How can a method for allocating radio resources in a non-GSO satellite communication system, operating in frequency bands shared with geostationary satellite systems and particularly in high frequency bands such as Ka, minimize interference to geostationary systems, without causing reduced geographic coverage due to fixed high separation angle thresholds or incurring cost and sub-optimality from over-dimensioning the non-GSO satellite constellation?'
    Example 2: 'How to address the vulnerability of on-aircraft avionics software update integrity checks to system compromises and the logistical challenges of cryptographic key management in digital signature solutions, in order to establish a more secure and logistically efficient method for updating safety-critical avionics equipment?'
    Example 3: 'How can SIM cards be protected against collision attacks that aim to retrieve the secret key Ki by analyzing the input and output of the authentication algorithm during the standard GSM authentication process, given that current tamper-proof measures are insufficient to prevent this type of key extraction?'
    Example 4: 'How can a Trusted Application in a GlobalPlatform compliant TEE overcome the GP specification limitations that enforce client blocking during task execution, prevent partial task execution, and delete TA execution context between commands, to function as a persistent server with stateful sessions and asynchronous communication capabilities, thereby enabling server functionalities like continuous listening and non-blocking send/receive, currently impossible due to GP's sequential task processing and stateless TA operation?'
    
    As far as possible, avoid using acronyms in the problematic.
    Try to be about the same length as the examples if possible."""

    try:
        logger.info("Calling Gemini API to generate problematic...")
        # Use the specified model and configuration
        model_name = "gemini-2.5-flash-preview-04-17"
        model = genai.GenerativeModel(model_name)

        response = model.generate_content(prompt)
        # Extract the result
        problematic_result = response.text.strip()
        logger.info("Successfully generated problematic from Gemini.")
        logger.debug(f"Generated problematic: {problematic_result[:200]}...") # Log snippet

    except Exception as e:
        logger.error(f"Error calling Gemini API: {e}", exc_info=True)
        # Check for specific Gemini API errors if possible
        raise HTTPException(status_code=502, detail=f"Failed to generate problematic using LLM: {e}")

    if not problematic_result:
         logger.error("Gemini API returned an empty result.")
         raise HTTPException(status_code=502, detail="LLM returned an empty problematic.")

    # --- 2. Evaluate Specificity using External API ---
    API_URL = "https://organizedprogrammers-fastapi-problematic-specificity.hf.space"
    endpoint = f"{API_URL}/predict"
    data = {"text": problematic_result}

    try:
        logger.info(f"Calling specificity prediction API at {endpoint}...")
        prediction_response = requests.post(endpoint, json=data, timeout=30) # Added timeout
        prediction_response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)

        score_data = prediction_response.json()
        logger.info(f"Successfully received specificity score: {score_data}")

        # Validate the received score data against Pydantic model
        try:
            specificity_score = SpecificityScore(**score_data)
        except Exception as pydantic_error: # Catch validation errors
            logger.error(f"Failed to validate specificity score response: {pydantic_error}", exc_info=True)
            logger.error(f"Invalid data received from specificity API: {score_data}")
            raise HTTPException(status_code=502, detail="Invalid response format from specificity prediction API.")

    except requests.exceptions.RequestException as e:
        logger.error(f"Error calling specificity prediction API: {e}", exc_info=True)
        raise HTTPException(status_code=502, detail=f"Failed to call specificity prediction API: {e}")
    except Exception as e: # Catch other potential errors like JSON decoding
         logger.error(f"Unexpected error during specificity evaluation: {e}", exc_info=True)
         raise HTTPException(status_code=500, detail=f"Internal error during specificity evaluation: {e}")


    # --- 3. Return Combined Result ---
    final_response = SpecificityEvaluationResponse(
        problematic=problematic_result,
        specificity=specificity_score
    )
    return final_response


@app.post("/create-problem-description", response_model=ProblemDescriptionResponse)
async def gen_problem_description(request: ProblemDescriptionRequest):
    """
    Generates a problem description using Gemini based on one or more detailed
    descriptions and a technical topic.
    """
    # Check if Gemini library was imported and configured
    if not genai:
        logger.error("Gemini client is not available or configured.")
        raise HTTPException(status_code=503, detail="Service Unavailable: Gemini client not configured")

    descriptions = request.descriptions
    technical_topic = request.technical_topic

    # Validate input: need at least one description and a topic
    if not descriptions: # Check if the list is empty
        raise HTTPException(status_code=400, detail="Field 'descriptions' cannot be empty.")
    if not technical_topic:
        raise HTTPException(status_code=400, detail="Missing required field: technical_topic.")

    logger.info(f"Received request for problem description generation with {len(descriptions)} description(s).")
    logger.debug(f"Topic: {technical_topic}")

    # --- Build Prompt using helper function ---
    prompt = build_prompt(descriptions, technical_topic)

    if prompt is None:
        # This case should theoretically be caught by the validation above, but good practice to check
        logger.error("Failed to build prompt (likely empty description list).")
        raise HTTPException(status_code=500, detail="Internal error: Could not build prompt.")

    try:
        logger.info("Calling Gemini API to generate problem description...")
        # Use the specified model and configuration
        model_name = "gemini-2.5-flash-preview-04-17"
        model = genai.GenerativeModel(model_name)

        response = model.generate_content(prompt)
        # Extract the result
        result_text = response.text.strip()
        logger.info("Successfully generated problem description from Gemini.")
        logger.debug(f"Generated description: {result_text[:200]}...") # Log snippet

    except Exception as e:
        logger.error(f"Error calling Gemini API for problem description: {e}", exc_info=True)
        # Check for specific Gemini API errors if possible
        raise HTTPException(status_code=502, detail=f"Failed to generate problem description using LLM: {e}")

    if not result_text:
         logger.error("Gemini API returned an empty result for problem description.")
         raise HTTPException(status_code=502, detail="LLM returned an empty problem description.")

    # --- Return Result ---
    # Return as JSON using the Pydantic model
    return ProblemDescriptionResponse(problem_description=result_text)
    # Alternative: Return as plain text
    # return PlainTextResponse(content=result_text)

@app.post("/key-issue-format", response_model=KeyIssueFormatResponse)
async def return_key_issue():
    """
    Returns a hardcoded example of formatted key issues.
    This endpoint does not accept any input body.
    """
    logger.info("Received request for hardcoded key issue format.")
    # Hardcoded result data
    result = {'key_issues': [{'id': 1, 'title': 'MCX Priority Handling Conflict: AF vs. Subscription Data', 'description': 'Conflicting prioritization between Application Function (AF) requests and subscription data for Mission Critical Services (MCX) needs resolution. Allowing AFs to override subscription priorities raises security concerns and requires robust error handling. Consistent enforcement across the 5GC is crucial.', 'challenges': ['Guaranteeing precedence of dynamic AF requests over subscription data.', 'Addressing security implications of AF-based priority overrides.', 'Defining detailed error handling for unfulfilled AF priority requests.'], 'potential_impact': 'Unreliable MCX service prioritization and potential security vulnerabilities.'}, {'id': 2, 'title': 'ATSSS Complexity and Interoperability Across 5GC and EPC', 'description': 'Access Traffic Steering, Switching, and Splitting (ATSSS) introduces complexity, especially with EPC interworking. The optional nature of ATSSS raises interoperability concerns and requires clear fallback mechanisms. Consistent steering mode handling across 3GPP and non-3GPP access networks is essential.', 'challenges': ['Managing mobility, security, and charging in ATSSS interworking scenarios.', 'Defining fallback mechanisms for non-ATSSS capable UEs and networks.', 'Ensuring consistent steering mode handling across different access networks.'], 'potential_impact': 'Interoperability issues and inconsistent ATSSS behavior.'}, {'id': 3, 'title': 'PCF Scalability and Performance with MCX and ATSSS', 'description': 'The Policy Control Function (PCF) plays a central role in MCX and ATSSS, handling subscription data changes, dynamic AF requests, and ATSSS control. This raises concerns about PCF scalability and performance, especially with a high volume of users and services. The complexity of PCC rules impacts PCF processing time.', 'challenges': ['Handling subscription data changes and dynamic AF requests efficiently.', 'Optimizing PCF performance with complex PCC rules for MCX and ATSSS.', 'Ensuring PCF scalability to support a large number of users and services.'], 'potential_impact': 'PCF bottlenecks and degraded network performance.'}, {'id': 4, 'title': 'Policy Conflict Resolution During Network Slice Replacement', 'description': "Prioritizing the replaced S-NSSAI's policy during network slice replacement might negate benefits of the alternative slice. There is a need for more granularity in how the PCF combines policies, especially when policy parameters are contradictory. Need to evaluate when the alternative S-NSSAI policy should take precedence.", 'challenges': ['Developing sophisticated policy conflict resolution mechanisms.', 'Handling contradictory policy parameters during slice replacement.', 'Determining when alternative slice policy should override replaced slice policy.'], 'potential_impact': 'Suboptimal performance or negated benefits of the alternative network slice.'}, {'id': 5, 'title': 'SMF Decision Logic for Retaining PDU Sessions During Slice Replacement', 'description': "The SMF's criteria for retaining existing PDU Sessions during slice replacement are vague and need more precise definition. Lack of clear guidelines could lead to inconsistent behavior across different implementations. Need to define other factors influencing this decision to avoid suboptimal routing.", 'challenges': ['Defining clear criteria for SMF decision-making on PDU session retention.', "Handling scenarios where PCF can be reused but shouldn't be.", 'Ensuring consistent behavior across different SMF implementations.'], 'potential_impact': 'Inconsistent routing and unnecessary session re-establishment.'}, {'id': 6, 'title': 'Privacy in Multi-hop Network Slicing with SPNS Protocol', 'description': 'Protecting user privacy in multi-hop network slice deployments presents a challenge. Secure Slice Selection and RAN Authentication and Trust are areas that require standardization. Onion Routing integration needs to be defined to work with existing 3GPP protocols.', 'challenges': ['Defining mechanisms to protect user privacy across multiple RAN nodes.', 'Controlling visibility of slice identifiers and policies to RAN nodes.', 'Establishing trust between different RAN nodes participating in the same slice.', 'Integrating onion routing with existing 3GPP protocols.'], 'potential_impact': 'Potential privacy breaches and security vulnerabilities in multi-hop network slice deployments.'}, {'id': 7, 'title': 'Multi-Tenancy Security and Network Slice Isolation', 'description': "Multi-tenancy introduces significant security concerns in 5G network slicing. Current isolation mechanisms might be insufficient to prevent cross-slice interference or attacks. There is a need to define the terms 'isolation' and 'insulation' and their respective security requirements.", 'challenges': ['Preventing cross-slice interference or attacks in multi-tenant environments.', "Clearly defining 'isolation' and 'insulation' in the context of network slicing.", 'Optimally selecting network slice isolation points based on risk assessment.'], 'potential_impact': 'Security breaches and compromised isolation between network slices.'}, {'id': 8, 'title': 'Adversarial Machine Learning (AML) Attacks on 5G Systems', 'description': 'Adversarial Machine Learning (AML) introduces new security vulnerabilities in 5G systems. AML attacks on spectrum sharing and physical layer authentication have not been adequately considered. Standardizing or recommending AML-robust machine learning models is needed.', 'challenges': ['Standardizing AML-resilient ML models for critical 5G functions.', 'Defining security requirements for ML-enabled network elements.', 'Developing testing and validation frameworks for AML resilience.'], 'potential_impact': 'Compromised security and reliability of 5G networks due to AML attacks.'}, {'id': 9, 'title': 'V2X Application Support with Network Slicing: Information Abstraction', 'description': 'Efficiently presenting/abstracting the relevant network slice information to the V2X application layer needs further study. The V2X application needs to understand the capabilities and limitations of the underlying slice. The information needs to be abstracted efficiently.', 'challenges': ['Determining what network slice information is necessary for V2X applications.', 'Defining how network slice information should be formatted for V2X applications.', 'Enabling V2X applications to understand slice capabilities and limitations.'], 'potential_impact': 'Inefficient use of network slices by V2X applications.'}, {'id': 10, 'title': 'V2X Application Support with Network Slicing: Multi-PLMN Coordination', 'description': 'Supporting V2X applications seamlessly across different Public Land Mobile Networks (PLMNs) and across different network slices is a challenge. Coordination is crucial for consistent performance in roaming and non-roaming scenarios. Roaming needs to be taken into account.', 'challenges': ['Supporting V2X applications across different PLMNs and network slices.', 'Ensuring consistent V2X performance in roaming and non-roaming scenarios.', 'Coordinating network slices across different network operators.'], 'potential_impact': 'Inconsistent V2X performance when vehicles move between different networks.'}]}
    # FastAPI automatically converts the dictionary to a JSON response
    # We use the response_model for validation and documentation
    return result

@app.post("/create-several-probdesc", response_model=CreateSeveralProbDescResponse)
async def create_several_probdesc(request: CreateSeveralProbDescRequest):
    """
    Generates multiple problem descriptions, each focused on a specific challenge
    related to a key issue description. Executes calls concurrently.
    """
    # Check if Gemini library was imported and configured
    if not genai:
        logger.error("Gemini client is not available or configured.")
        raise HTTPException(status_code=503, detail="Service Unavailable: Gemini client not configured")

    descriptions_map = request.descriptions
    challenges_map = request.challenges
    technical_topic = request.technical_topic

    # Basic input validation
    if not descriptions_map or not challenges_map or not technical_topic:
        raise HTTPException(status_code=400, detail="Missing required fields: descriptions, challenges, or technical_topic.")

    logger.info(f"Received request to generate multiple problem descriptions for topic: {technical_topic}")

    # --- Prepare and Execute Concurrent Gemini Calls ---
    model_name = "gemini-2.5-flash-preview-04-17"
    model = genai.GenerativeModel(model_name)
    generated_descriptions = []

    # Iterate through challenges to create tasks
    for key_issue_id, list_of_challenges in challenges_map.items():
        # Get the corresponding description
        description = descriptions_map.get(key_issue_id)
        successful_count = 0
        failed_count = 0
        if description is None:
            logger.warning(f"No description found for key_issue_id {key_issue_id}. Skipping its challenges.")
            continue # Skip challenges for this ID if description is missing

        if not list_of_challenges:
             logger.warning(f"Empty challenge list for key_issue_id {key_issue_id}. Skipping.")
             continue

        # Create a task for each challenge associated with this description
        for ind, challenge in enumerate(list_of_challenges):
            if not challenge: # Skip empty challenges
                 logger.warning(f"Skipping empty challenge string for key_issue_id {key_issue_id}.")
                 continue
            else:
                prompt = f"""I want you to create a problem description using a key issue explained in a detailed description.
Here is the description of the key issue: <description>{description}</description>
This key issue is part of the following technical topic: <technical_topic>{technical_topic}</technical_topic>
And the main focus of the problem description must be the following: <focus>{challenge}</focus>

As the output, I only want you to provide the problem description found, nothing else.

Here are examples of problem descriptions that you could create, it shows the level of specificity that you should aim for:

Example 1: 'I am working on enhancing security in 4G and 5G telecommunications networks, specifically in the area of network slicing. My goal is to address vulnerabilities in the isolation of slices and improve the integrity of data transmission across different network slices, ensuring they are properly protected against unauthorized access and attacks.'
Example 2: 'I am working on improving user authentication in the context of 3GPP (3rd Generation Partnership Project) networks. Specifically, I need to solve issues related to the security and efficiency of the authentication process in the 5G network architecture, focusing on the use of cryptographic keys and identity management.'

As far as possible, avoid using acronyms in the problem description.
Try to be about the same length as the examples if possible."""

                try:
                    logger.info(f"Calling Gemini API to generate problem description {ind+1} ...")
            
                    response = model.generate_content(prompt)
                    # Extract the result
                    result_text = response.text.strip()
                    generated_descriptions.append(result_text)
                    successful_count += 1
                    logger.info(f"Successfully generated problem description {ind+1} from Gemini.")
                    logger.debug(f"Generated description: {result_text[:200]}...") # Log snippet
    
                except Exception as e:
                    failed_count += 1
                    logger.error(f"Error calling Gemini API for problem description {ind+1}: {e}", exc_info=True)
                    # Check for specific Gemini API errors if possible
                    raise HTTPException(status_code=502, detail=f"Failed to generate problem description using LLM: {e}")

        logger.info(f"Successfully generated descriptions: {successful_count}/{len(list_of_challenges)} ")

    # --- Return Result ---
    return CreateSeveralProbDescResponse(problem_descriptions=generated_descriptions)


# --- How to Run ---
if __name__ == "__main__":
    # Make sure to set environment variables for config (NEO4J_URI, NEO4J_PASSWORD, GEMINI_API_KEY, etc.)
    # or have a .env file in the same directory where you run this script.
    print("Starting API server...")
    print("Ensure required environment variables (e.g., NEO4J_URI, NEO4J_PASSWORD, GEMINI_API_KEY) are set or .env file is present.")
    # Run with uvicorn: uvicorn api:app --reload --host 0.0.0.0 --port 8000
    # The --reload flag is good for development. Remove it for production.
    uvicorn.run("api:app", host="0.0.0.0", port=8000, reload=True) # Use reload=False for production