mgbam commited on
Commit
959fea7
·
verified ·
1 Parent(s): b544656

Update prompts/prompt_templates.py

Browse files
Files changed (1) hide show
  1. prompts/prompt_templates.py +70 -0
prompts/prompt_templates.py CHANGED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # algoforge_prime/prompts/prompt_templates.py
2
+
3
+ # These functions help construct the user-facing part of the prompts,
4
+ # ensuring consistency and making it easier to manage complex prompt structures.
5
+
6
+ def format_genesis_user_prompt(
7
+ problem_description: str,
8
+ initial_hints: str,
9
+ attempt_num: int,
10
+ total_attempts: int
11
+ ) -> str:
12
+ """Formats the user prompt for the Genesis (initial solution generation) stage."""
13
+ hints_section = f"Initial Thoughts/Constraints to consider: \"{initial_hints}\"" if initial_hints else "No specific initial hints provided."
14
+ return (
15
+ f"User Problem Description: \"{problem_description}\"\n\n"
16
+ f"{hints_section}\n\n"
17
+ f"Task: Please provide one distinct and complete algorithmic solution or well-commented code (as appropriate for the problem type) to address the problem description. "
18
+ f"This is solution attempt #{attempt_num} of {total_attempts}. "
19
+ f"If possible, try to offer a different approach or perspective compared to what might be a very obvious first attempt."
20
+ )
21
+
22
+ def format_critique_user_prompt(
23
+ problem_description_context: str, # A snippet for context
24
+ solution_text_to_evaluate: str
25
+ ) -> str:
26
+ """Formats the user prompt for the Critique (evaluation) stage."""
27
+ return (
28
+ f"Context: The overall problem being solved is related to: \"{problem_description_context[:200]}...\"\n\n"
29
+ f"Solution to Evaluate:\n"
30
+ f"```\n{solution_text_to_evaluate}\n```\n\n"
31
+ f"Task: Critically evaluate this solution according to the system instructions (correctness, efficiency, clarity, etc.). "
32
+ f"Remember to provide your overall assessment and conclude with the mandatory 'Score: X/10' line."
33
+ )
34
+
35
+ def format_evolution_user_prompt(
36
+ problem_description_context: str,
37
+ original_solution_text: str,
38
+ comprehensive_critique_text: str, # Includes LLM critique and test summary
39
+ original_score: int
40
+ ) -> str:
41
+ """Formats the user prompt for the Evolution (refinement) stage."""
42
+ return (
43
+ f"Context: The original problem is: \"{problem_description_context}\"\n\n"
44
+ f"Solution to Evolve (Original Score: {original_score}/10):\n"
45
+ f"```\n{original_solution_text}\n```\n\n"
46
+ f"Comprehensive Evaluation Received for the Above Solution:\n"
47
+ f"'''\n{comprehensive_critique_text}\n'''\n\n"
48
+ f"Task: Based on the provided solution and its comprehensive evaluation, your goal is to evolve it into a significantly improved version. "
49
+ f"Address all identified weaknesses, enhance its strengths, and ensure it is more robust, efficient, or clear. "
50
+ f"Output the *complete evolved solution*. "
51
+ f"Follow this with a brief explanation of the key changes and improvements you implemented."
52
+ )
53
+
54
+ def format_code_test_analysis_user_prompt(
55
+ generated_code: str,
56
+ unit_tests_code: str,
57
+ test_execution_summary: str # e.g., "Simulated: 2/3 tests passed. Some tests likely failed."
58
+ ) -> str:
59
+ """Formats the user prompt for asking an LLM to analyze code test results."""
60
+ return (
61
+ f"Generated Python Code:\n"
62
+ f"```python\n{generated_code}\n```\n\n"
63
+ f"Unit Tests Applied:\n"
64
+ f"```python\n{unit_tests_code}\n```\n\n"
65
+ f"Test Execution Summary:\n"
66
+ f"'''\n{test_execution_summary}\n'''\n\n"
67
+ f"Task: Please analyze these test results in relation to the generated Python code. "
68
+ f"Provide a concise explanation of what the test outcomes (passes, failures, errors) indicate about the code's correctness or potential issues. "
69
+ f"If tests failed, suggest likely reasons or areas in the code to inspect."
70
+ )