File size: 2,197 Bytes
89da890
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# algoforge_prime/core/evolution_engine.py
from .llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
from ..prompts.system_prompts import get_system_prompt

def evolve_solution(
    original_solution_text,
    original_critique_text, # This now includes test results if any
    original_score,
    problem_description,
    problem_type,
    llm_client_config # Dict: {"type": ..., "model_id": ..., "temp": ..., "max_tokens": ...}
):
    system_p_evolve = get_system_prompt("evolution_general", problem_type)
    
    # Construct a more detailed user prompt for evolution
    user_p_evolve = (
        f"Original Problem (for context): \"{problem_description}\"\n\n"
        f"The current leading solution (which had a score of {original_score}/10) is:\n```\n{original_solution_text}\n```\n"
        f"The comprehensive critique for this solution was:\n'''\n{original_critique_text}\n'''\n\n"
        f"Your mission: Evolve this solution. Make it demonstrably superior based on the critique and any test failures mentioned. "
        "If the original solution was just a sketch, flesh it out. If it had flaws (especially those highlighted by tests), fix them. "
        "If it was good, make it great. Explain the key improvements you've made as part of your response."
    )

    llm_response_obj = None
    if llm_client_config["type"] == "hf":
        llm_response_obj = call_huggingface_api(
            user_p_evolve, llm_client_config["model_id"],
            temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
            system_prompt_text=system_p_evolve
        )
    elif llm_client_config["type"] == "google_gemini":
        llm_response_obj = call_gemini_api(
            user_p_evolve, llm_client_config["model_id"],
            temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
            system_prompt_text=system_p_evolve
        )

    if llm_response_obj and llm_response_obj.success:
        return llm_response_obj.text
    elif llm_response_obj:
        return f"ERROR (Evolution): {llm_response_obj.error}"
    else:
        return "ERROR (Evolution): Unknown error during LLM call."