Spaces:
Sleeping
Sleeping
Update core/evolution_engine.py
Browse files- core/evolution_engine.py +44 -0
core/evolution_engine.py
CHANGED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# algoforge_prime/core/evolution_engine.py
|
2 |
+
from .llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
|
3 |
+
from ..prompts.system_prompts import get_system_prompt
|
4 |
+
|
5 |
+
def evolve_solution(
|
6 |
+
original_solution_text,
|
7 |
+
original_critique_text, # This now includes test results if any
|
8 |
+
original_score,
|
9 |
+
problem_description,
|
10 |
+
problem_type,
|
11 |
+
llm_client_config # Dict: {"type": ..., "model_id": ..., "temp": ..., "max_tokens": ...}
|
12 |
+
):
|
13 |
+
system_p_evolve = get_system_prompt("evolution_general", problem_type)
|
14 |
+
|
15 |
+
# Construct a more detailed user prompt for evolution
|
16 |
+
user_p_evolve = (
|
17 |
+
f"Original Problem (for context): \"{problem_description}\"\n\n"
|
18 |
+
f"The current leading solution (which had a score of {original_score}/10) is:\n```\n{original_solution_text}\n```\n"
|
19 |
+
f"The comprehensive critique for this solution was:\n'''\n{original_critique_text}\n'''\n\n"
|
20 |
+
f"Your mission: Evolve this solution. Make it demonstrably superior based on the critique and any test failures mentioned. "
|
21 |
+
"If the original solution was just a sketch, flesh it out. If it had flaws (especially those highlighted by tests), fix them. "
|
22 |
+
"If it was good, make it great. Explain the key improvements you've made as part of your response."
|
23 |
+
)
|
24 |
+
|
25 |
+
llm_response_obj = None
|
26 |
+
if llm_client_config["type"] == "hf":
|
27 |
+
llm_response_obj = call_huggingface_api(
|
28 |
+
user_p_evolve, llm_client_config["model_id"],
|
29 |
+
temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
|
30 |
+
system_prompt_text=system_p_evolve
|
31 |
+
)
|
32 |
+
elif llm_client_config["type"] == "google_gemini":
|
33 |
+
llm_response_obj = call_gemini_api(
|
34 |
+
user_p_evolve, llm_client_config["model_id"],
|
35 |
+
temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
|
36 |
+
system_prompt_text=system_p_evolve
|
37 |
+
)
|
38 |
+
|
39 |
+
if llm_response_obj and llm_response_obj.success:
|
40 |
+
return llm_response_obj.text
|
41 |
+
elif llm_response_obj:
|
42 |
+
return f"ERROR (Evolution): {llm_response_obj.error}"
|
43 |
+
else:
|
44 |
+
return "ERROR (Evolution): Unknown error during LLM call."
|