File size: 4,508 Bytes
dd9dfb2
 
a0a78d2
dd9dfb2
 
 
1cc90f6
dd9dfb2
 
1cc90f6
dd9dfb2
 
 
1cc90f6
dd9dfb2
1cc90f6
dd9dfb2
 
 
 
2d643fe
 
dd9dfb2
 
 
 
 
 
 
89da890
dd9dfb2
 
 
 
 
 
 
 
 
 
 
1cc90f6
dd9dfb2
 
 
 
 
 
 
 
 
 
 
 
1cc90f6
dd9dfb2
 
 
 
 
 
89da890
dd9dfb2
 
 
 
 
 
 
 
 
 
1cc90f6
dd9dfb2
 
 
 
 
89da890
dd9dfb2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# algoforge_prime/core/evolution_engine.py
print("DEBUG: Importing core.evolution_engine")

# --- Corrected Imports ---
# Absolute imports for modules outside the 'core' package
from prompts.system_prompts import get_system_prompt

# Absolute imports for other modules within the 'core' package (or relative for siblings)
from core.llm_clients import call_huggingface_api, call_gemini_api, LLMResponse

# Relative import for a sibling module within the 'core' package
# from .safe_executor import ExecutionResult # Not directly used in this module, but evaluation_output_obj might contain it
# from .evaluation_engine import EvaluationResultOutput # For type hinting the parameter

print("DEBUG: core.evolution_engine - Imports successful")

def evolve_solution(
    original_solution_text: str,
    evaluation_output_obj, # This object comes from evaluation_engine and contains EvaluationResultOutput
                           # It will have a .get_display_critique() method and .combined_score attribute
    problem_description: str,
    problem_type: str,
    llm_client_config: dict # {"type": ..., "model_id": ..., "temp": ..., "max_tokens": ...}
) -> str: # Returns evolved solution text or an error string
    """
    Attempts to evolve a solution based on its comprehensive evaluation details.
    """
    print(f"DEBUG: evolution_engine.py - Evolving solution. Problem type: {problem_type}")
    system_p_evolve = get_system_prompt("evolution_general") # problem_type can be used for specialization here too
    
    # Extract necessary info from the evaluation_output_obj
    # This assumes evaluation_output_obj is an instance of EvaluationResultOutput from evaluation_engine.py
    # or at least has these attributes/methods.
    try:
        critique_and_test_feedback = evaluation_output_obj.get_display_critique() 
        original_score = evaluation_output_obj.combined_score
    except AttributeError as e:
        print(f"ERROR: evolution_engine.py - evaluation_output_obj is missing expected attributes/methods: {e}")
        # Fallback if the object structure is not as expected
        critique_and_test_feedback = "Critique data was not in the expected format."
        original_score = 0 # Assign a neutral score if real one can't be found

    user_p_evolve = (
        f"Original Problem Context: \"{problem_description}\"\n\n"
        f"The solution to be evolved achieved a combined score of {original_score}/10.\n"
        f"Here is the original solution text:\n```python\n{original_solution_text}\n```\n\n"
        f"Here is the comprehensive evaluation it received (including LLM critique and automated test feedback if run):\n'''\n{critique_and_test_feedback}\n'''\n\n"
        f"Your Task: Based on ALL the information above (solution, LLM critique, and crucially any test execution results/errors mentioned in the evaluation), "
        f"evolve the provided solution to make it demonstrably superior. "
        f"Prioritize fixing any reported execution errors or failed tests. "
        f"Then, address other critique points like efficiency, clarity, or completeness. "
        f"Output the *complete evolved solution*. "
        f"Follow this with a brief explanation of the key changes and improvements you implemented, especially how you addressed test failures or execution issues."
    )

    llm_response_obj = None # type: LLMResponse
    if llm_client_config["type"] == "hf":
        llm_response_obj = call_huggingface_api(
            user_p_evolve, llm_client_config["model_id"],
            temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
            system_prompt_text=system_p_evolve
        )
    elif llm_client_config["type"] == "google_gemini":
        llm_response_obj = call_gemini_api(
            user_p_evolve, llm_client_config["model_id"],
            temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
            system_prompt_text=system_p_evolve
        )
    else:
        error_msg = f"ERROR (Evolution): Unknown LLM client type '{llm_client_config['type']}'"
        print(f"ERROR: evolution_engine.py - {error_msg}")
        return error_msg

    if llm_response_obj.success:
        return llm_response_obj.text
    else:
        # Error is already logged by call_..._api functions if it's from there
        return f"ERROR (Evolution with {llm_response_obj.model_id_used}): {llm_response_obj.error}"

print("DEBUG: core.evolution_engine - Module fully defined.")