File size: 2,380 Bytes
d686fd7
 
3fd2bb1
d686fd7
 
 
 
 
3fd2bb1
d686fd7
 
 
3fd2bb1
 
 
 
 
 
 
 
 
 
 
d686fd7
 
 
 
 
 
3fd2bb1
d686fd7
 
 
 
 
 
 
 
 
 
 
 
3fd2bb1
 
 
d686fd7
3fd2bb1
 
 
 
d686fd7
3fd2bb1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# algoforge_prime/core/generation_engine.py
from .llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
from ..prompts.system_prompts import get_system_prompt # Relative import from parent
from ..prompts.prompt_templates import format_genesis_user_prompt

def generate_initial_solutions(
    problem_description,
    initial_hints,
    problem_type, # e.g., "Python Algorithm with Tests"
    num_solutions_to_generate,
    llm_client_config # Dict: {"type": ..., "model_id": ..., "temp": ..., "max_tokens": ...}
):
    """
    Generates a list of initial solution strings using the configured LLM.
    Returns a list of strings, where each string is either a solution or an error message.
    """
    solutions_or_errors = []
    # Select system prompt based on problem type, more specific for Python
    system_p_key = "genesis_general"
    if "python" in problem_type.lower():
        system_p_key = "genesis_python"
    system_p_genesis = get_system_prompt(system_p_key)


    for i in range(num_solutions_to_generate):
        user_p_genesis = format_genesis_user_prompt(
            problem_description, initial_hints, i + 1, num_solutions_to_generate
        )
        
        llm_response_obj = None # type: LLMResponse
        if llm_client_config["type"] == "hf":
            llm_response_obj = call_huggingface_api(
                user_p_genesis, llm_client_config["model_id"],
                temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
                system_prompt_text=system_p_genesis
            )
        elif llm_client_config["type"] == "google_gemini":
            llm_response_obj = call_gemini_api(
                user_p_genesis, llm_client_config["model_id"],
                temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
                system_prompt_text=system_p_genesis
            )
        else:
            solutions_or_errors.append(f"ERROR (Genesis Attempt {i+1}): Unknown LLM client type '{llm_client_config['type']}'")
            continue
        
        if llm_response_obj.success:
            solutions_or_errors.append(llm_response_obj.text)
        else: 
            solutions_or_errors.append(f"ERROR (Genesis Attempt {i+1} with {llm_response_obj.model_id_used}): {llm_response_obj.error}")
            
    return solutions_or_errors