Spaces:
Sleeping
Sleeping
Update core/evolution_engine.py
Browse files- core/evolution_engine.py +25 -20
core/evolution_engine.py
CHANGED
@@ -1,28 +1,33 @@
|
|
1 |
# algoforge_prime/core/evolution_engine.py
|
2 |
from .llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
|
3 |
from ..prompts.system_prompts import get_system_prompt
|
|
|
4 |
|
5 |
def evolve_solution(
|
6 |
-
original_solution_text,
|
7 |
-
|
8 |
-
|
9 |
-
problem_description,
|
10 |
-
problem_type,
|
11 |
-
llm_client_config #
|
12 |
-
):
|
13 |
-
|
|
|
|
|
|
|
14 |
|
15 |
-
# Construct a more detailed user prompt for evolution
|
16 |
user_p_evolve = (
|
17 |
-
f"Original Problem
|
18 |
-
f"The
|
19 |
-
f"
|
20 |
-
f"
|
21 |
-
"
|
22 |
-
"
|
|
|
|
|
23 |
)
|
24 |
|
25 |
-
llm_response_obj = None
|
26 |
if llm_client_config["type"] == "hf":
|
27 |
llm_response_obj = call_huggingface_api(
|
28 |
user_p_evolve, llm_client_config["model_id"],
|
@@ -35,10 +40,10 @@ def evolve_solution(
|
|
35 |
temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
|
36 |
system_prompt_text=system_p_evolve
|
37 |
)
|
|
|
|
|
38 |
|
39 |
-
if llm_response_obj
|
40 |
return llm_response_obj.text
|
41 |
-
elif llm_response_obj:
|
42 |
-
return f"ERROR (Evolution): {llm_response_obj.error}"
|
43 |
else:
|
44 |
-
return "ERROR (Evolution):
|
|
|
1 |
# algoforge_prime/core/evolution_engine.py
|
2 |
from .llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
|
3 |
from ..prompts.system_prompts import get_system_prompt
|
4 |
+
# from ..prompts.prompt_templates import format_evolution_user_prompt # If you create one
|
5 |
|
6 |
def evolve_solution(
|
7 |
+
original_solution_text: str,
|
8 |
+
comprehensive_critique_text: str, # This includes LLM critique + test summary
|
9 |
+
original_combined_score: int,
|
10 |
+
problem_description: str,
|
11 |
+
problem_type: str,
|
12 |
+
llm_client_config: dict # {"type": ..., "model_id": ..., "temp": ..., "max_tokens": ...}
|
13 |
+
) -> str: # Returns evolved solution text or an error string
|
14 |
+
"""
|
15 |
+
Attempts to evolve a solution based on its critique and score.
|
16 |
+
"""
|
17 |
+
system_p_evolve = get_system_prompt("evolution_general") # problem_type can be used for specialization
|
18 |
|
|
|
19 |
user_p_evolve = (
|
20 |
+
f"Original Problem Context: \"{problem_description}\"\n\n"
|
21 |
+
f"The solution to be evolved achieved a score of {original_combined_score}/10.\n"
|
22 |
+
f"Here is the solution text:\n```python\n{original_solution_text}\n```\n\n"
|
23 |
+
f"Here is the comprehensive evaluation and critique it received (including any automated test feedback):\n'''\n{comprehensive_critique_text}\n'''\n\n"
|
24 |
+
f"Your Task: Based on the above, evolve the provided solution to make it demonstrably superior. "
|
25 |
+
f"Address any flaws, incompleteness, or inefficiencies mentioned in the critique or highlighted by test failures. "
|
26 |
+
f"If the solution was good, make it even better (e.g., more robust, more efficient, clearer). "
|
27 |
+
f"Clearly explain the key improvements you've made as an integral part of your evolved response (e.g., in comments or a concluding summary)."
|
28 |
)
|
29 |
|
30 |
+
llm_response_obj = None # type: LLMResponse
|
31 |
if llm_client_config["type"] == "hf":
|
32 |
llm_response_obj = call_huggingface_api(
|
33 |
user_p_evolve, llm_client_config["model_id"],
|
|
|
40 |
temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
|
41 |
system_prompt_text=system_p_evolve
|
42 |
)
|
43 |
+
else:
|
44 |
+
return f"ERROR (Evolution): Unknown LLM client type '{llm_client_config['type']}'"
|
45 |
|
46 |
+
if llm_response_obj.success:
|
47 |
return llm_response_obj.text
|
|
|
|
|
48 |
else:
|
49 |
+
return f"ERROR (Evolution with {llm_response_obj.model_id_used}): {llm_response_obj.error}"
|