StoryVerseWeaver / core /evolution_engine.py
mgbam's picture
Update core/evolution_engine.py
2eda30d verified
raw
history blame
3.54 kB
# algoforge_prime/core/evolution_engine.py
print("DEBUG: Importing core.evolution_engine")
from core.llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
from prompts.system_prompts import get_system_prompt
# from core.evaluation_engine import EvaluationResultOutput # For type hinting if needed
print("DEBUG: core.evolution_engine - Imports successful")
def evolve_solution(
original_solution_text: str,
evaluation_output_obj, # This is an EvaluationResultOutput object
problem_description: str,
problem_type: str,
llm_client_config: dict
) -> str:
print(f"DEBUG: evolution_engine.py - Evolving solution. Problem type: {problem_type}")
system_p_evolve = get_system_prompt("evolution_general")
try:
# Use the method from EvaluationResultOutput to get formatted critique and test results
critique_and_test_feedback = evaluation_output_obj.get_display_critique()
original_score = evaluation_output_obj.combined_score
except AttributeError: # Fallback if evaluation_output_obj is not as expected
critique_and_test_feedback = "Detailed evaluation feedback was not available or malformed."
original_score = 0 # Or try to get it from evaluation_output_obj if it's just a simple dict
if hasattr(evaluation_output_obj, 'score'): original_score = evaluation_output_obj.score
elif isinstance(evaluation_output_obj, dict) and 'score' in evaluation_output_obj: original_score = evaluation_output_obj['score']
user_p_evolve = (
f"Original Problem Context: \"{problem_description}\"\n\n"
f"The solution to be evolved achieved a combined score of {original_score}/10.\n"
f"Here is the original solution text:\n```python\n{original_solution_text}\n```\n\n"
f"Here is the comprehensive evaluation it received (including LLM critique AND automated test feedback/errors if run):\n'''\n{critique_and_test_feedback}\n'''\n\n"
f"Your Task: Based on ALL the information above (solution, LLM critique, and crucially any test execution results/errors mentioned in the evaluation), "
f"evolve the provided solution to make it demonstrably superior. "
f"**Your HIGHEST PRIORITY is to fix any reported execution errors or failed tests.** "
f"Then, address other critique points like efficiency, clarity, or completeness. "
f"Output ONLY the *complete, raw, evolved Python code block*. Do not include explanations outside the code block unless explicitly part of the solution's comments."
)
llm_response_obj = None
if llm_client_config["type"] == "hf":
llm_response_obj = call_huggingface_api(user_p_evolve, llm_client_config["model_id"], llm_client_config["temp"], llm_client_config["max_tokens"], system_p_evolve)
elif llm_client_config["type"] == "google_gemini":
llm_response_obj = call_gemini_api(user_p_evolve, llm_client_config["model_id"], llm_client_config["temp"], llm_client_config["max_tokens"], system_p_evolve)
else:
return f"ERROR (Evolution): Unknown LLM client type '{llm_client_config['type']}'"
if llm_response_obj.success:
# Optional: basic cleanup of the LLM output if it tends to add markdown
from core.utils import basic_text_cleanup # Assuming you have this
return basic_text_cleanup(llm_response_obj.text)
else:
return f"ERROR (Evolution with {llm_response_obj.model_id_used}): {llm_response_obj.error}"
print("DEBUG: core.evolution_engine - Module fully defined.")