Update api.py
Browse files
api.py
CHANGED
@@ -28,7 +28,7 @@ except ImportError as e:
|
|
28 |
import google.generativeai as genai
|
29 |
|
30 |
try:
|
31 |
-
from scoring.specificity import load_model as load_specificity_model, predict_batch
|
32 |
except ImportError as e:
|
33 |
print(f"Error importing scoring component: {e}")
|
34 |
print("Please ensure scoring is in your Python path or installed.")
|
@@ -283,8 +283,11 @@ def generate_problematics_and_scores(descriptions: list[str]):
|
|
283 |
logger.error("Gemini API returned an empty result.")
|
284 |
raise HTTPException(status_code=502, detail="LLM returned an empty problematic.")
|
285 |
|
286 |
-
# --- 2. Evaluate Specificity
|
287 |
-
|
|
|
|
|
|
|
288 |
return results
|
289 |
|
290 |
|
|
|
28 |
import google.generativeai as genai
|
29 |
|
30 |
try:
|
31 |
+
from scoring.specificity import load_model as load_specificity_model, predict_batch, ProblematicList
|
32 |
except ImportError as e:
|
33 |
print(f"Error importing scoring component: {e}")
|
34 |
print("Please ensure scoring is in your Python path or installed.")
|
|
|
283 |
logger.error("Gemini API returned an empty result.")
|
284 |
raise HTTPException(status_code=502, detail="LLM returned an empty problematic.")
|
285 |
|
286 |
+
# --- 2. Evaluate Specificity ---
|
287 |
+
if not interrogative_probs:
|
288 |
+
return []
|
289 |
+
problematic_list_input = ProblematicList(problematics=interrogative_probs)
|
290 |
+
results = predict_batch(problematic_list_input)
|
291 |
return results
|
292 |
|
293 |
|