Update api.py
Browse files
api.py
CHANGED
@@ -28,7 +28,7 @@ except ImportError as e:
|
|
28 |
import google.generativeai as genai
|
29 |
|
30 |
try:
|
31 |
-
from scoring.specificity import load_model as load_specificity_model
|
32 |
except ImportError as e:
|
33 |
print(f"Error importing scoring component: {e}")
|
34 |
print("Please ensure scoring is in your Python path or installed.")
|
@@ -91,9 +91,10 @@ class CreateSeveralProbDescRequest(BaseModel):
|
|
91 |
keys_issues: List[KeyIssueInput] # Changed field name and type
|
92 |
technical_topic: str
|
93 |
|
94 |
-
class
|
95 |
-
|
96 |
-
|
|
|
97 |
|
98 |
# --- Global Variables / State ---
|
99 |
# Keep the graph instance global for efficiency if desired,
|
@@ -258,7 +259,6 @@ def evaluation(descriptions: list[str]):
|
|
258 |
Example 3: 'How can SIM cards be protected against collision attacks that aim to retrieve the secret key Ki by analyzing the input and output of the authentication algorithm during the standard GSM authentication process, given that current tamper-proof measures are insufficient to prevent this type of key extraction?'
|
259 |
Example 4: 'How can a Trusted Application in a GlobalPlatform compliant TEE overcome the GP specification limitations that enforce client blocking during task execution, prevent partial task execution, and delete TA execution context between commands, to function as a persistent server with stateful sessions and asynchronous communication capabilities, thereby enabling server functionalities like continuous listening and non-blocking send/receive, currently impossible due to GP's sequential task processing and stateless TA operation?'
|
260 |
|
261 |
-
As far as possible, avoid using acronyms in the problematic.
|
262 |
Try to be about the same length as the examples if possible."""
|
263 |
|
264 |
try:
|
@@ -283,41 +283,9 @@ def evaluation(descriptions: list[str]):
|
|
283 |
logger.error("Gemini API returned an empty result.")
|
284 |
raise HTTPException(status_code=502, detail="LLM returned an empty problematic.")
|
285 |
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
data = {"text": problematic_result}
|
290 |
-
|
291 |
-
try:
|
292 |
-
logger.info(f"Calling specificity prediction API at {endpoint}...")
|
293 |
-
prediction_response = requests.post(endpoint, json=data, timeout=30) # Added timeout
|
294 |
-
prediction_response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
295 |
-
|
296 |
-
score_data = prediction_response.json()
|
297 |
-
logger.info(f"Successfully received specificity score: {score_data}")
|
298 |
-
|
299 |
-
# Validate the received score data against Pydantic model
|
300 |
-
try:
|
301 |
-
specificity_score = SpecificityScore(**score_data)
|
302 |
-
except Exception as pydantic_error: # Catch validation errors
|
303 |
-
logger.error(f"Failed to validate specificity score response: {pydantic_error}", exc_info=True)
|
304 |
-
logger.error(f"Invalid data received from specificity API: {score_data}")
|
305 |
-
raise HTTPException(status_code=502, detail="Invalid response format from specificity prediction API.")
|
306 |
-
|
307 |
-
except requests.exceptions.RequestException as e:
|
308 |
-
logger.error(f"Error calling specificity prediction API: {e}", exc_info=True)
|
309 |
-
raise HTTPException(status_code=502, detail=f"Failed to call specificity prediction API: {e}")
|
310 |
-
except Exception as e: # Catch other potential errors like JSON decoding
|
311 |
-
logger.error(f"Unexpected error during specificity evaluation: {e}", exc_info=True)
|
312 |
-
raise HTTPException(status_code=500, detail=f"Internal error during specificity evaluation: {e}")
|
313 |
-
|
314 |
-
|
315 |
-
# --- 3. Return Combined Result ---
|
316 |
-
final_response = SpecificityEvaluationResponse(
|
317 |
-
problematic=problematic_result,
|
318 |
-
specificity=specificity_score
|
319 |
-
)
|
320 |
-
return final_response
|
321 |
|
322 |
|
323 |
# --- API Endpoint ---
|
@@ -597,7 +565,7 @@ async def return_key_issue():
|
|
597 |
# We use the response_model for validation and documentation
|
598 |
return result
|
599 |
|
600 |
-
@app.post("/create-several-probdesc", response_model=
|
601 |
async def create_several_probdesc(request: CreateSeveralProbDescRequest):
|
602 |
"""
|
603 |
Generates multiple problem descriptions, each focused on a specific challenge
|
@@ -677,9 +645,23 @@ Try to be about the same length as the examples if possible."""
|
|
677 |
|
678 |
logger.info(f"Successfully generated descriptions: {successful_count}/{len(list_of_challenges)} ")
|
679 |
|
680 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
681 |
# --- Return Result ---
|
682 |
-
return
|
683 |
|
684 |
|
685 |
# --- How to Run ---
|
|
|
28 |
import google.generativeai as genai
|
29 |
|
30 |
try:
|
31 |
+
from scoring.specificity import load_model as load_specificity_model, predict_batch
|
32 |
except ImportError as e:
|
33 |
print(f"Error importing scoring component: {e}")
|
34 |
print("Please ensure scoring is in your Python path or installed.")
|
|
|
91 |
keys_issues: List[KeyIssueInput] # Changed field name and type
|
92 |
technical_topic: str
|
93 |
|
94 |
+
class ProblemDescriptionItem(BaseModel):
|
95 |
+
problem_description: str
|
96 |
+
problematic: str
|
97 |
+
score: float
|
98 |
|
99 |
# --- Global Variables / State ---
|
100 |
# Keep the graph instance global for efficiency if desired,
|
|
|
259 |
Example 3: 'How can SIM cards be protected against collision attacks that aim to retrieve the secret key Ki by analyzing the input and output of the authentication algorithm during the standard GSM authentication process, given that current tamper-proof measures are insufficient to prevent this type of key extraction?'
|
260 |
Example 4: 'How can a Trusted Application in a GlobalPlatform compliant TEE overcome the GP specification limitations that enforce client blocking during task execution, prevent partial task execution, and delete TA execution context between commands, to function as a persistent server with stateful sessions and asynchronous communication capabilities, thereby enabling server functionalities like continuous listening and non-blocking send/receive, currently impossible due to GP's sequential task processing and stateless TA operation?'
|
261 |
|
|
|
262 |
Try to be about the same length as the examples if possible."""
|
263 |
|
264 |
try:
|
|
|
283 |
logger.error("Gemini API returned an empty result.")
|
284 |
raise HTTPException(status_code=502, detail="LLM returned an empty problematic.")
|
285 |
|
286 |
+
# --- 2. Evaluate Specificity using External API ---
|
287 |
+
results = predict_batch(interrogative_probs)
|
288 |
+
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
289 |
|
290 |
|
291 |
# --- API Endpoint ---
|
|
|
565 |
# We use the response_model for validation and documentation
|
566 |
return result
|
567 |
|
568 |
+
@app.post("/create-several-probdesc", response_model=List[ProblemDescriptionItem])
|
569 |
async def create_several_probdesc(request: CreateSeveralProbDescRequest):
|
570 |
"""
|
571 |
Generates multiple problem descriptions, each focused on a specific challenge
|
|
|
645 |
|
646 |
logger.info(f"Successfully generated descriptions: {successful_count}/{len(list_of_challenges)} ")
|
647 |
|
648 |
+
evaluated_problematics = evaluation(generated_descriptions)
|
649 |
+
|
650 |
+
final_output = []
|
651 |
+
for i in len(generated_descriptions):
|
652 |
+
problem_description = generated_descriptions[i]
|
653 |
+
problematic = evaluated_problematics[i].problematic
|
654 |
+
score = evaluated_problematics[i].score
|
655 |
+
|
656 |
+
final_output.append(
|
657 |
+
ProblemDescriptionItem(
|
658 |
+
problem_description=problem_description,
|
659 |
+
problematic=problematic
|
660 |
+
score=score
|
661 |
+
)
|
662 |
+
)
|
663 |
# --- Return Result ---
|
664 |
+
return final_output
|
665 |
|
666 |
|
667 |
# --- How to Run ---
|