mgbam commited on
Commit
48e5e22
·
verified ·
1 Parent(s): 3fd2bb1

Update core/evaluation_engine.py

Browse files
Files changed (1) hide show
  1. core/evaluation_engine.py +188 -0
core/evaluation_engine.py CHANGED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # algoforge_prime/core/evaluation_engine.py
2
+ import random
3
+ import time
4
+ import traceback
5
+ # IMPORTANT: The following import is for a HYPOTHETICAL safe executor.
6
+ # You would need to implement or find a robust sandboxing solution.
7
+ # from .restricted_env_executor import execute_python_code_safely # Example
8
+
9
+ from .llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
10
+ from ..prompts.system_prompts import get_system_prompt
11
+ from ..prompts.prompt_templates import format_critique_user_prompt
12
+
13
+ class EvaluationResult:
14
+ def __init__(self, score=0, critique_text="", passed_tests=0, total_tests=0, execution_summary=None, raw_llm_critique_response=None):
15
+ self.score = score # Final combined score
16
+ self.critique_text = critique_text # LLM based critique + execution summary
17
+ self.passed_tests = passed_tests
18
+ self.total_tests = total_tests
19
+ self.execution_summary = execution_summary # Error or success message from code execution
20
+ self.raw_llm_critique_response = raw_llm_critique_response
21
+
22
+ def __str__(self): # For simple string representation if needed
23
+ return f"Score: {self.score}/10. Tests: {self.passed_tests}/{self.total_tests}. Summary: {self.execution_summary}. Critique: {self.critique_text[:100]}..."
24
+
25
+ def _parse_score_from_llm_text(llm_text_output: str) -> int:
26
+ """Helper to parse 'Score: X/10' from LLM's textual output."""
27
+ score = 0 # Default if not found or unparsable
28
+ if not llm_text_output or not isinstance(llm_text_output, str):
29
+ return score
30
+
31
+ try:
32
+ # Look for "Score: X/10" or "Score: X"
33
+ # More robust parsing might be needed depending on LLM variability
34
+ import re
35
+ match = re.search(r"Score:\s*(\d+)(?:\s*/\s*10)?", llm_text_output, re.IGNORECASE)
36
+ if match:
37
+ parsed_score_val = int(match.group(1))
38
+ score = max(1, min(parsed_score_val, 10)) # Clamp score to 1-10
39
+ else: # Fallback if specific format not found
40
+ print(f"INFO: evaluation_engine.py - 'Score: X/10' marker not found in LLM output. Assigning fallback score. Output: {llm_text_output[:100]}...")
41
+ score = random.randint(3, 6) # Assign a mediocre random score
42
+ except Exception as e:
43
+ print(f"WARNING: evaluation_engine.py - Error parsing score from LLM output '{llm_text_output[:100]}...': {e}")
44
+ score = random.randint(3, 5) # Fallback on parsing error
45
+ return score
46
+
47
+ def _placeholder_safe_python_execution(code_string: str, user_tests_string: str) -> tuple[int, int, str]:
48
+ """
49
+ PLACEHOLDER for safe Python code execution.
50
+ **WARNING: THIS IS NOT SAFE FOR PRODUCTION. IT ONLY SIMULATES.**
51
+ Replace with a robust sandboxing mechanism (Docker, nsjail, WASM, etc.).
52
+ """
53
+ print(f"DEV_INFO: evaluation_engine.py - Entering PLACEHOLDER for code execution.")
54
+ print(f" Code (first 100 chars): {code_string[:100]}...")
55
+ print(f" Tests (first 100 chars): {user_tests_string[:100]}...")
56
+
57
+ if not user_tests_string.strip() or not code_string.strip():
58
+ return 0, 0, "SIMULATED: No tests provided or no code to test."
59
+
60
+ # Naive parsing of assert statements
61
+ test_lines = [line.strip() for line in user_tests_string.splitlines() if line.strip().startswith("assert")]
62
+ total_tests_found = len(test_lines)
63
+
64
+ if total_tests_found == 0:
65
+ return 0, 0, "SIMULATED: No 'assert' statements found in user tests."
66
+
67
+ # Extremely simplistic simulation logic (NOT REAL EXECUTION)
68
+ passed_count = 0
69
+ execution_log = ["SIMULATED EXECUTION LOG:"]
70
+ try:
71
+ # This is where real sandboxed execution would happen.
72
+ # We'll simulate based on keywords for demonstration.
73
+ if "syntax error" in code_string.lower() or "indentationerror" in code_string.lower():
74
+ execution_log.append(" - Simulated: Potential syntax error in generated code.")
75
+ # passed_count remains 0
76
+ elif "runtime error" in code_string.lower() or "exception" in code_string.lower():
77
+ execution_log.append(" - Simulated: Code might raise a runtime error.")
78
+ passed_count = random.randint(0, total_tests_found // 3) # Few pass
79
+ elif "return" not in code_string and any("==" in t for t in test_lines): # If expecting a return value
80
+ execution_log.append(" - Simulated: Code might be missing a crucial 'return' statement.")
81
+ passed_count = random.randint(0, total_tests_found // 2)
82
+ else: # Simulate some passing, some failing
83
+ passed_count = random.randint(total_tests_found // 2, total_tests_found)
84
+ execution_log.append(f" - Simulated: {passed_count} of {total_tests_found} tests likely passed.")
85
+
86
+ if passed_count < total_tests_found:
87
+ execution_log.append(f" - Simulated: {total_tests_found - passed_count} test(s) likely failed.")
88
+
89
+ summary = f"Simulated: {passed_count}/{total_tests_found} tests passed."
90
+ if passed_count < total_tests_found : summary += " Some tests likely failed."
91
+
92
+ except Exception as e_sim: # Error in our simulation logic
93
+ summary = f"Error during test SIMULATION logic: {str(e_sim)}"
94
+ passed_count = 0
95
+ execution_log.append(f" - ERROR in simulation: {e_sim}")
96
+
97
+ print(f"DEV_INFO: evaluation_engine.py - Placeholder execution result: {summary}")
98
+ return passed_count, total_tests_found, "\n".join(execution_log)
99
+
100
+
101
+ def evaluate_solution_candidate(
102
+ solution_text: str,
103
+ problem_description: str,
104
+ problem_type: str,
105
+ user_provided_tests: str, # String of Python assert statements
106
+ llm_client_config: dict # {"type": ..., "model_id": ..., "temp": ..., "max_tokens": ...}
107
+ ) -> EvaluationResult:
108
+ """
109
+ Evaluates a single solution candidate.
110
+ """
111
+ llm_critique_output_text = "LLM critique could not be performed due to an earlier error or API issue."
112
+ llm_based_score = 0
113
+ raw_llm_critique_resp = None
114
+
115
+ # 1. LLM-based Critique (if solution_text is not an error message itself)
116
+ if solution_text and not solution_text.startswith("ERROR"):
117
+ system_p_critique = get_system_prompt("critique_general") # problem_type can be used here too
118
+ user_p_critique = format_critique_user_prompt(problem_description, solution_text)
119
+
120
+ llm_response_obj = None # type: LLMResponse
121
+ if llm_client_config["type"] == "hf":
122
+ llm_response_obj = call_huggingface_api(
123
+ user_p_critique, llm_client_config["model_id"],
124
+ temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
125
+ system_prompt_text=system_p_critique
126
+ )
127
+ elif llm_client_config["type"] == "google_gemini":
128
+ llm_response_obj = call_gemini_api(
129
+ user_p_critique, llm_client_config["model_id"],
130
+ temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
131
+ system_prompt_text=system_p_critique
132
+ )
133
+
134
+ if llm_response_obj:
135
+ raw_llm_critique_resp = llm_response_obj.raw_response
136
+ if llm_response_obj.success:
137
+ llm_critique_output_text = llm_response_obj.text
138
+ llm_based_score = _parse_score_from_llm_text(llm_critique_output_text)
139
+ else:
140
+ llm_critique_output_text = f"Error during LLM critique (Model: {llm_response_obj.model_id_used}): {llm_response_obj.error}"
141
+ llm_based_score = 0 # Penalize for critique failure
142
+ elif solution_text and solution_text.startswith("ERROR"):
143
+ llm_critique_output_text = f"Solution was an error from Genesis: {solution_text}"
144
+ llm_based_score = 0
145
+
146
+
147
+ # 2. (Simulated) Code Execution if applicable
148
+ passed_tests_count = 0
149
+ total_tests_count = 0
150
+ exec_summary_msg = "Automated tests not applicable or not run for this problem type/solution."
151
+
152
+ # Only run tests if it's a Python problem, tests are provided, and solution isn't an error
153
+ if "python" in problem_type.lower() and user_provided_tests.strip() and solution_text and not solution_text.startswith("ERROR"):
154
+ # **IMPORTANT**: Replace with a REAL sandboxed executor for safety.
155
+ passed_tests_count, total_tests_count, exec_summary_msg = _placeholder_safe_python_execution(
156
+ solution_text, user_provided_tests
157
+ )
158
+ elif "python" in problem_type.lower() and not user_provided_tests.strip():
159
+ exec_summary_msg = "No user tests provided for this Python problem."
160
+
161
+
162
+ # 3. Combine Scores into a Final Score (Example Heuristic)
163
+ final_score_calculated = llm_based_score
164
+ if total_tests_count > 0: # If tests were run
165
+ test_pass_ratio = passed_tests_count / total_tests_count
166
+ if test_pass_ratio < 0.5 : # Penalize heavily if less than half tests pass
167
+ final_score_calculated = max(1, int(llm_based_score * 0.5) - 1)
168
+ elif test_pass_ratio == 1.0 and passed_tests_count > 0: # All tests passed
169
+ final_score_calculated = min(10, llm_based_score + 1 if llm_based_score < 10 else 10) # Small bonus
170
+ else: # Some tests passed or ratio between 0.5 and 1.0
171
+ final_score_calculated = int(llm_based_score * (0.6 + 0.4 * test_pass_ratio))
172
+ final_score_calculated = max(1, min(10, final_score_calculated)) # Ensure score is 1-10
173
+
174
+ # Construct comprehensive critique text for display
175
+ comprehensive_critique = f"{llm_critique_output_text}"
176
+ if total_tests_count > 0 or ("python" in problem_type.lower() and user_provided_tests.strip()): # Add test summary if applicable
177
+ comprehensive_critique += f"\n\n**Automated Test Summary (Simulated):**\n{exec_summary_msg}\n"
178
+ comprehensive_critique += f"Passed: {passed_tests_count}/{total_tests_count}"
179
+
180
+
181
+ return EvaluationResult(
182
+ score=final_score_calculated,
183
+ critique_text=comprehensive_critique,
184
+ passed_tests=passed_tests_count,
185
+ total_tests=total_tests_count,
186
+ execution_summary=exec_summary_msg,
187
+ raw_llm_critique_response=raw_llm_critique_resp
188
+ )