StoryVerseWeaver / core /generation_engine.py
mgbam's picture
Update core/generation_engine.py
b09495b verified
# algoforge_prime/core/generation_engine.py
print("DEBUG: Importing core.generation_engine") # For checking if this file is reached
try:
# Using absolute imports assuming 'algoforge_prime' (containing 'core' and 'prompts')
# is the top-level package context when app.py runs.
from core.llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
from prompts.system_prompts import get_system_prompt
from prompts.prompt_templates import format_genesis_user_prompt
print("DEBUG: core.generation_engine - Imports successful")
except ImportError as e:
print(f"ERROR: core.generation_engine - ImportError during its own imports: {e}")
# This exception would likely prevent the rest of the file from defining generate_initial_solutions
# and would be the root cause of the error seen in app.py
raise # Re-raise to make it obvious in logs if this is the point of failure
def generate_initial_solutions(
problem_description: str,
initial_hints: str,
problem_type: str, # e.g., "Python Algorithm with Tests"
num_solutions_to_generate: int,
llm_client_config: dict # {"type": "hf" or "google_gemini", "model_id": "...", "temp": ..., "max_tokens": ...}
) -> list[str]: # Returns a list of strings (solutions or error messages)
"""
Generates a list of initial solution strings using the configured LLM.
Returns a list of strings, where each string is either a solution or an error message.
"""
print(f"DEBUG: generate_initial_solutions called with problem_type: {problem_type}, num_solutions: {num_solutions_to_generate}")
solutions_or_errors = []
# Select system prompt based on problem type
system_p_key = "genesis_general" # Default system prompt key
if "python" in problem_type.lower():
system_p_key = "genesis_python"
try:
system_p_genesis = get_system_prompt(system_p_key)
if not system_p_genesis: # Check if get_system_prompt returned an empty string (fallback)
print(f"WARNING: core.generation_engine - System prompt for key '{system_p_key}' was empty. Proceeding without system prompt for genesis.")
except Exception as e:
print(f"ERROR: core.generation_engine - Failed to get system prompt: {e}")
# Decide how to handle this: proceed without, or return an error for all solutions?
# For now, let's log and proceed without a system prompt if it fails.
system_p_genesis = None # Or some very generic fallback string
for i in range(num_solutions_to_generate):
print(f"DEBUG: Generating solution candidate {i+1}/{num_solutions_to_generate}")
try:
user_p_genesis = format_genesis_user_prompt(
problem_description, initial_hints, i + 1, num_solutions_to_generate
)
except Exception as e:
print(f"ERROR: core.generation_engine - Failed to format genesis user prompt: {e}")
solutions_or_errors.append(f"ERROR (Genesis Attempt {i+1}): Internal error formatting prompt.")
continue # Skip to next attempt
llm_response_obj = None # type: LLMResponse
if not llm_client_config or "type" not in llm_client_config or "model_id" not in llm_client_config:
error_msg = f"ERROR (Genesis Attempt {i+1}): Invalid llm_client_config provided."
print(f"CRITICAL_ERROR: core.generation_engine - {error_msg}")
solutions_or_errors.append(error_msg)
continue
try:
if llm_client_config["type"] == "hf":
llm_response_obj = call_huggingface_api(
user_p_genesis,
llm_client_config["model_id"],
temperature=llm_client_config.get("temp", 0.7), # Use .get for safety
max_new_tokens=llm_client_config.get("max_tokens", 512),
system_prompt_text=system_p_genesis
)
elif llm_client_config["type"] == "google_gemini":
llm_response_obj = call_gemini_api(
user_p_genesis,
llm_client_config["model_id"],
temperature=llm_client_config.get("temp", 0.7),
max_new_tokens=llm_client_config.get("max_tokens", 768),
system_prompt_text=system_p_genesis
)
else:
solutions_or_errors.append(f"ERROR (Genesis Attempt {i+1}): Unknown LLM client type '{llm_client_config['type']}'")
continue # Skip to next attempt
except Exception as e_call:
# This catch block is crucial if call_..._api functions themselves raise exceptions
# before returning an LLMResponse object (though they are designed to return LLMResponse(error=...))
error_msg = f"ERROR (Genesis Attempt {i+1} calling LLM {llm_client_config['model_id']}): Exception during API call: {type(e_call).__name__} - {str(e_call)}"
print(f"ERROR: core.generation_engine - {error_msg}")
solutions_or_errors.append(error_msg)
continue
if llm_response_obj and llm_response_obj.success:
solutions_or_errors.append(llm_response_obj.text if llm_response_obj.text is not None else "")
print(f"DEBUG: Solution candidate {i+1} generated successfully (Model: {llm_response_obj.model_id_used}).")
elif llm_response_obj: # Error occurred and was encapsulated in LLMResponse
solutions_or_errors.append(f"ERROR (Genesis Attempt {i+1} with {llm_response_obj.model_id_used}): {llm_response_obj.error}")
print(f"DEBUG: Solution candidate {i+1} FAILED with error from LLMResponse (Model: {llm_response_obj.model_id_used}). Error: {llm_response_obj.error}")
else: # Should ideally not happen if LLMResponse is always returned from call_..._api
solutions_or_errors.append(f"ERROR (Genesis Attempt {i+1}): Unknown error, LLM response object was None.")
print(f"CRITICAL_DEBUG: Solution candidate {i+1} - LLM response object was None. This indicates an issue in call_..._api not returning an LLMResponse object.")
print(f"DEBUG: generate_initial_solutions finished. Returning {len(solutions_or_errors)} items.")
return solutions_or_errors
# A print statement at the end of the module definition
print("DEBUG: core.generation_engine - Module fully defined, including generate_initial_solutions.")