mgbam commited on
Commit
c984bb4
·
verified ·
1 Parent(s): 888bdc1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +245 -198
app.py CHANGED
@@ -1,230 +1,277 @@
1
  import gradio as gr
2
- from transformers import pipeline # Placeholder for your chosen LLM integration strategy
3
- import os # For potential API keys
 
4
 
5
- # --- KAIZEN'S CORE CONFIGURATION ---
6
- # Esteemed colleagues, this is where you'd integrate the Cognitive Core (LLM).
7
- # For this demonstration, we use illustrative placeholders.
8
- # The true EvoForge Prime would connect to a distributed network of specialized AI agents.
9
 
10
- # Option 1: Local Pipeline (Requires powerful hardware or smaller models in HF Space)
11
- # COGNITIVE_CORE_PIPELINE = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.2", device_map="auto")
 
12
 
13
- # Option 2: Hugging Face Inference API (More scalable for public Spaces)
14
- # HF_TOKEN = os.getenv("HF_TOKEN") # Ensure HF_TOKEN is in Space Secrets
15
- # from huggingface_hub import InferenceClient
16
- # COGNITIVE_CORE_CLIENT = InferenceClient(token=HF_TOKEN)
17
- # DEFAULT_MODEL_ID = "mistralai/Mistral-7B-Instruct-v0.2" # Or your preferred model
18
 
19
- def invoke_cognitive_core(prompt_text, task_description="generation", num_sequences=1, model_id=None):
20
- """
21
- A centralized function to interact with our LLM.
22
- This is where the magic of actual LLM interaction would happen.
23
- Replace this with your chosen LLM interaction method.
24
- """
25
- print(f"\n[KAIZEN LOG] Task: {task_description}")
26
- print(f"[KAIZEN LOG] Invoking Cognitive Core with Prompt:\n{prompt_text[:300]}...\n") # Log a snippet
27
-
28
- # --- !!! ACTUAL LLM INTEGRATION POINT !!! ---
29
- # This section needs to be replaced with real LLM calls.
30
- # Example using a placeholder for now:
31
- if task_description == "initial_solution_generation":
32
- responses = [f"// EvoForge Candidate Solution {i+1} (Simulated)\n// Problem: {prompt_text[:50]}...\n// Approach: Employing principle of {['recursion', 'iteration', 'divide and conquer', 'dynamic programming'][i%4]}." for i in range(num_sequences)]
33
- return responses
34
- elif task_description == "solution_evaluation":
35
- # In reality, you'd parse score and detailed critique
36
- score = len(prompt_text) % 10 + 1 # Dummy score
37
- critique = f"EvoForge Analysis: Candidate exhibits potential. Clarity: {score-1}/10. Perceived Efficiency: {score}/10. Novelty: {(score+1)%10}/10. Overall Score: {score}/10."
38
- return critique, score
39
- elif task_description == "solution_refinement":
40
- return f"// EvoForge Synthesized Advancement (Simulated)\n// Original concept: {prompt_text[:60]}...\n// Refinement: Optimized data structures and streamlined control flow for enhanced performance and elegance."
41
- else:
42
- return [f"Cognitive Core Response to: {prompt_text[:70]}... (Simulation)"]
43
- # --- END LLM INTEGRATION POINT ---
44
-
45
-
46
- # --- EVOFORGE PRIME - CORE LOGIC MODULE ---
47
- def orchestrate_evolutionary_cycle(problem_domain, problem_statement, cognitive_catalysts, divergence_factor):
48
- if not problem_statement:
49
- return "A problem statement is the seed of innovation. Please provide one.", "", "", "", ""
50
-
51
- master_log = f"## EvoForge Prime - Cycle Report ##\n\n"
52
- master_log += f"**Domain:** {problem_domain}\n"
53
- master_log += f"**Problem Statement:** {problem_statement}\n"
54
- master_log += f"**Cognitive Catalysts (Hints):** {cognitive_catalysts if cognitive_catalysts else 'None provided'}\n"
55
- master_log += f"**Initial Solution Divergence Factor:** {divergence_factor}\n\n"
56
-
57
- # STAGE 1: Algorithmic Genesis - Generating Diverse Solution Candidates
58
- master_log += "### Stage 1: Algorithmic Genesis ###\n"
59
- genesis_prompt = f"""
60
- As an advanced AI algorithm designer, address the following problem:
61
- Problem Domain: {problem_domain}
62
- Problem Statement: "{problem_statement}"
63
- Consider these initial thoughts/constraints: "{cognitive_catalysts if cognitive_catalysts else 'N/A'}"
64
-
65
- Generate {divergence_factor} distinct and innovative algorithmic solutions or high-level approaches.
66
- For each, provide a conceptual outline or pseudo-code.
67
  """
68
- initial_candidates_raw = invoke_cognitive_core(genesis_prompt, "initial_solution_generation", num_sequences=divergence_factor)
69
- master_log += f"Invoked Cognitive Core for {divergence_factor} initial candidates.\n"
70
-
71
- if not initial_candidates_raw:
72
- return master_log + "Error: Cognitive Core failed to generate initial candidates.", "", "", ""
73
-
74
- # STAGE 2: Heuristic Evaluation Matrix - Assessing Candidate Viability
75
- master_log += "\n### Stage 2: Heuristic Evaluation Matrix ###\n"
76
- evaluated_candidates_display = []
77
- evaluated_candidates_data = []
78
-
79
- for idx, candidate_code in enumerate(initial_candidates_raw):
80
- evaluation_prompt = f"""
81
- Critically evaluate the following algorithmic solution candidate for the problem: "{problem_statement}".
82
- Solution Candidate {idx+1}:
83
- ```
84
- {candidate_code}
85
- ```
86
- Assess its potential correctness, efficiency, clarity, and novelty. Provide a structured critique and assign an overall viability score from 1 (low) to 10 (high).
87
- Format:
88
- Critique: [Your detailed analysis]
89
- Score: [Score_Value]
90
- """
91
- critique, score = invoke_cognitive_core(evaluation_prompt, "solution_evaluation")
92
- evaluated_candidates_display.append(f"**Candidate {idx+1} (Score: {score}/10):**\n```\n{candidate_code}\n```\n**EvoForge Analysis:** {critique}\n---\n")
93
- evaluated_candidates_data.append({"solution": candidate_code, "score": score, "critique": critique, "id": idx+1})
94
- master_log += f"Evaluated Candidate {idx+1}. Score: {score}/10.\n"
95
-
96
- if not evaluated_candidates_data:
97
- return master_log + "\n".join(evaluated_candidates_display), "Error: No candidates available for evaluation.", "", ""
98
-
99
- # STAGE 3: Apex Selection - Identifying the Most Promising Evolutionary Path
100
- master_log += "\n### Stage 3: Apex Selection ###\n"
101
- evaluated_candidates_data.sort(key=lambda x: x["score"], reverse=True)
102
- apex_candidate_data = evaluated_candidates_data[0]
103
- apex_candidate_solution = apex_candidate_data["solution"]
104
- apex_candidate_critique = apex_candidate_data["critique"]
105
- apex_candidate_score = apex_candidate_data["score"]
106
- master_log += f"Apex Candidate (ID: {apex_candidate_data['id']}) selected with score {apex_candidate_score}/10.\n"
107
-
108
- # STAGE 4: Algorithmic Refinement - Synthesizing an Advanced Iteration
109
- master_log += "\n### Stage 4: Algorithmic Refinement ###\n"
110
- refinement_prompt = f"""
111
- Given the problem: "{problem_statement}"
112
- And the current leading solution candidate (Score: {apex_candidate_score}/10):
113
- ```
114
- {apex_candidate_solution}
115
- ```
116
- And its evaluation: "{apex_candidate_critique}"
117
-
118
- Refine this solution. Enhance its efficiency, elegance, robustness, or explore a novel optimization.
119
- Explain the key improvements made in the refined version.
120
  """
121
- refined_solution_text = invoke_cognitive_core(refinement_prompt, "solution_refinement")
122
- master_log += "Invoked Cognitive Core for solution refinement.\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- # Prepare outputs for Gradio Interface
125
- initial_solutions_output_md = "## Stage 1 & 2: Generated Candidates & Evaluations\n" + "\n".join(evaluated_candidates_display)
126
- apex_solution_output_md = f"## Stage 3: Apex Candidate Selection\n**Selected Candidate (ID: {apex_candidate_data['id']}, Score: {apex_candidate_score}/10):**\n```\n{apex_candidate_solution}\n```\n**Original EvoForge Analysis:** {apex_candidate_critique}"
127
- refined_solution_output_md = f"## Stage 4: Synthesized Advancement\n**EvoForge Refined Solution:**\n```\n{refined_solution_text}\n```"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
- # Exposing the "thought process" prompts for transparency
130
- developer_prompts_log = f"""
131
- ## Architect's Blueprint: Simulated Cognitive Core Dialogues ##
132
-
133
- **Objective:** {problem_statement}
134
-
135
- **1. Genesis Prompt Snippet (sent to Cognitive Core):**
136
- ```
137
- ... Generate {divergence_factor} distinct and innovative algorithmic solutions ...
138
- Problem Statement: "{problem_statement[:100]}..."
139
- Consider these initial thoughts/constraints: "{str(cognitive_catalysts)[:100]}..."
140
- ```
141
- *(Generated {len(initial_candidates_raw)} candidates)*
142
-
143
- **2. Evaluation Prompt Snippet (for each candidate, e.g., Candidate 1):**
144
- ```
145
- Critically evaluate the following algorithmic solution candidate ...
146
- Solution Candidate 1:
147
- {initial_candidates_raw[0][:150]}...
148
- ... Assign an overall viability score ...
149
- ```
150
- *(Selected Apex Candidate ID: {apex_candidate_data['id']} with score: {apex_candidate_score})*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
- **3. Refinement Prompt Snippet (sent to Cognitive Core):**
153
- ```
154
- ... Refine this solution. Enhance its efficiency, elegance, robustness ...
155
- Leading solution candidate (Score: {apex_candidate_score}/10):
156
- {apex_candidate_solution[:150]}...
157
- And its evaluation: "{apex_candidate_critique[:100]}..."
158
- ```
159
- --- End of Blueprint ---
160
- """
161
 
162
- return initial_solutions_output_md, apex_solution_output_md, refined_solution_output_md, developer_prompts_log, master_log
163
 
164
- # --- EVOFORGE PRIME - GRADIO INTERFACE MANIFEST ---
165
- interface_header = """
166
- # EvoForge Prime: Conceptual Algorithmic Evolution Engine
167
- ### *Crafted by Aelius Kaizen, Architect of Tomorrow's Algorithms*
 
 
168
 
169
- **Inspired by the principles demonstrated by Google DeepMind's AlphaEvolve, EvoForge Prime offers a glimpse into AI-driven algorithmic discovery and refinement.**
170
- This is a *conceptual demonstration* using simulated or actual LLM interactions (if configured) to mimic an evolutionary process:
171
- 1. **Genesis:** Diverse solutions are ideated by a Cognitive Core (LLM).
172
- 2. **Evaluation:** Each candidate is critically analyzed (by an LLM or heuristic).
173
- 3. **Apex Selection:** The most promising candidate is identified.
174
- 4. **Refinement:** The Cognitive Core attempts to enhance the apex candidate.
175
 
176
- **This is a blueprint, a thought experiment made interactive. The actual AlphaEvolve is a vastly complex, proprietary system.**
177
  """
178
 
179
- with gr.Blocks(theme=gr.themes.Glass(primary_hue="blue", secondary_hue="cyan"), title="EvoForge Prime") as evo_forge_interface:
180
- gr.Markdown(interface_header)
 
 
 
181
 
182
  with gr.Row():
183
  with gr.Column(scale=1):
184
- gr.Markdown("## I. Define the Algorithmic Frontier")
185
- input_problem_domain = gr.Dropdown(
186
- ["Python Code Optimization", "Abstract Algorithm Design", "Mathematical Logic Puzzle", "Data Structure Innovation", "General Heuristic Improvement"],
187
- label="Select Problem Domain",
188
- value="Python Code Optimization"
189
  )
190
- input_problem_statement = gr.Textbox(
191
- lines=6,
192
- label="Articulate the Problem or Goal",
193
- placeholder="e.g., 'Design a Python function to efficiently find the k-th smallest element in an unsorted list.' or 'Propose a novel heuristic for the Traveling Salesperson Problem.'"
194
  )
195
- input_cognitive_catalysts = gr.Textbox(
196
  lines=3,
197
- label="Seed with Cognitive Catalysts (Optional Hints / Constraints)",
198
- placeholder="e.g., 'Prioritize space complexity.' or 'Consider a non-comparison-based sort for a sub-problem.'"
199
  )
200
- input_divergence_factor = gr.Slider(1, 5, value=3, step=1, label="Initial Solution Divergence (Number of Candidates)")
201
 
202
- launch_evolution_btn = gr.Button("🚀 Initiate EvoForge Cycle", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
 
204
  with gr.Column(scale=2):
205
- gr.Markdown("## II. Evolutionary Trajectory & Artifacts")
206
  with gr.Tabs():
207
- with gr.TabItem("🧬 Genesis & Evaluation Matrix"):
208
- output_initial_candidates = gr.Markdown(label="Primary Solution Candidates & Heuristic Analysis")
209
- with gr.TabItem("🌟 Apex Candidate"):
210
- output_apex_candidate = gr.Markdown(label="Selected Evolutionary Path (Input to Refinement Stage)")
211
- with gr.TabItem("💡 Synthesized Advancement"):
212
- output_refined_solution = gr.Markdown(label="Refined Algorithmic Construct")
213
- with gr.TabItem("📜 Architect's Blueprint (LLM Dialogues)"):
214
- output_developer_prompts = gr.Markdown(label="Simulated/Actual Prompts to Cognitive Core")
215
- with gr.TabItem("📋 Cycle Report"):
216
- output_master_log = gr.Markdown(label="Comprehensive Log of the Evolutionary Cycle")
217
-
218
- launch_evolution_btn.click(
219
- orchestrate_evolutionary_cycle,
220
- inputs=[input_problem_domain, input_problem_statement, input_cognitive_catalysts, input_divergence_factor],
221
- outputs=[output_initial_candidates, output_apex_candidate, output_refined_solution, output_developer_prompts, output_master_log]
 
 
 
 
 
 
 
222
  )
223
 
224
  gr.Markdown("---")
225
- gr.Markdown("**Aelius Kaizen's Disclaimer:** EvoForge Prime is a conceptual instrument designed to illuminate the *potential* of AI in algorithmic innovation. The 'Cognitive Core' (LLM) responses are probabilistic and serve as creative springboards, not definitive solutions. True algorithmic breakthroughs require rigorous mathematical proof, empirical validation, and the irreplaceable spark of human ingenuity complementing AI's prowess. This is but a humble step on a grand journey.")
 
 
 
226
 
227
- # To launch this marvel locally: python your_script_name.py
228
- # For Hugging Face Spaces, this file is 'app.py'. Include 'requirements.txt'.
229
  if __name__ == "__main__":
230
- evo_forge_interface.launch(share=False) # Set share=True for a public link if running locally
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import os
4
+ import random # For a bit of mock variety if needed
5
 
6
+ # --- ALGOFORGE PRIME™ CONFIGURATION & SECRETS ---
7
+ # THE SACRED HF_TOKEN - ENSURE THIS IS IN YOUR SPACE SECRETS
8
+ HF_TOKEN = os.getenv("HF_TOKEN")
 
9
 
10
+ if not HF_TOKEN:
11
+ print("WARNING: HF_TOKEN not found. LLM calls will fail. Please add HF_TOKEN to your Space Secrets!")
12
+ # You might want to raise an error or display a persistent warning in the UI
13
 
14
+ # Initialize the Inference Client - The Conduit to a Universe of Models!
15
+ client = InferenceClient(token=HF_TOKEN)
 
 
 
16
 
17
+ # Curated List of Models for Different Tasks (User Selectable!)
18
+ # You can expand this list. Ensure they are text-generation or instruct models.
19
+ AVAILABLE_MODELS = {
20
+ "General & Logic (Balanced)": "mistralai/Mistral-7B-Instruct-v0.2",
21
+ "Code Generation (Strong)": "codellama/CodeLlama-34b-Instruct-hf", # Might be slow, consider smaller CodeLlama
22
+ "Creative & Versatile (Fast)": "google/gemma-7b-it",
23
+ "Compact & Quick (Good for CPU tests)": "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
24
+ }
25
+ DEFAULT_MODEL = "mistralai/Mistral-7B-Instruct-v0.2"
26
+
27
+ # --- CORE AI ENGINEERING: LLM INTERACTION FUNCTIONS ---
28
+
29
+ def call_llm_via_api(prompt_text, model_id, temperature=0.7, max_new_tokens=350, system_prompt=None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  """
31
+ Centralized function to call the Hugging Face Inference API.
32
+ As an SRE, I like centralized, observable points of failure/success!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  """
34
+ if not HF_TOKEN:
35
+ return "ERROR: HF_TOKEN is not configured. Cannot contact the LLM Oracle."
36
+
37
+ full_prompt = prompt_text
38
+ if system_prompt: # Some models use system prompts differently, this is a basic way
39
+ full_prompt = f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n{prompt_text} [/INST]"
40
+
41
+
42
+ try:
43
+ response_stream = client.text_generation(
44
+ full_prompt,
45
+ model=model_id,
46
+ max_new_tokens=max_new_tokens,
47
+ temperature=temperature if temperature > 0 else None, # API expects None for temp 0
48
+ stream=False # Keep it simple for this demo; stream=True for real-time
49
+ )
50
+ # The response structure might vary slightly based on the model/client version.
51
+ # Typically, it's just the generated string.
52
+ # If it returns a dict: response_stream.get("generated_text", response_stream)
53
+ return response_stream
54
+ except Exception as e:
55
+ print(f"LLM API Call Error ({model_id}): {e}")
56
+ return f"LLM API Error: Could not connect or process request with {model_id}. Details: {str(e)}"
57
 
58
+ # --- ALGOFORGE PRIME™ - THE GRAND ORCHESTRATOR ---
59
+
60
+ def run_algoforge_simulation(
61
+ problem_type, problem_description, initial_hints,
62
+ num_initial_solutions, selected_model_key,
63
+ gen_temp, gen_max_tokens,
64
+ eval_temp, eval_max_tokens,
65
+ evolve_temp, evolve_max_tokens
66
+ ):
67
+ if not problem_description:
68
+ return "ERROR: Problem Description is the lifeblood of innovation! Please provide it.", "", "", "", ""
69
+
70
+ if not HF_TOKEN:
71
+ # This message will appear in the output fields if the token is missing
72
+ no_token_msg = "CRITICAL ERROR: HF_TOKEN is missing. AlgoForge Prime™ cannot access its cognitive core. Please configure HF_TOKEN in Space Secrets."
73
+ return no_token_msg, no_token_msg, no_token_msg, no_token_msg
74
+
75
+ model_id = AVAILABLE_MODELS.get(selected_model_key, DEFAULT_MODEL)
76
+ log_entries = [f"**AlgoForge Prime™ Initializing...**\nSelected Model Core: {model_id} ({selected_model_key})\nProblem Type: {problem_type}"]
77
+
78
+ # --- STAGE 1: GENESIS ENGINE - MULTIVERSE SOLUTION GENERATION ---
79
+ log_entries.append("\n**Stage 1: Genesis Engine - Generating Initial Solution Candidates...**")
80
+ generated_solutions_raw = []
81
+ system_prompt_generate = f"You are an expert {problem_type.lower().replace(' ', '_')} algorithm designer. Your goal is to brainstorm multiple diverse solutions."
82
+ for i in range(num_initial_solutions):
83
+ prompt_generate = (
84
+ f"Problem Description: \"{problem_description}\"\n"
85
+ f"Consider these initial thoughts/constraints: \"{initial_hints if initial_hints else 'None'}\"\n"
86
+ f"Please provide one distinct and complete solution/algorithm for this problem. "
87
+ f"This is solution attempt #{i+1} of {num_initial_solutions}. Try a different approach if possible."
88
+ )
89
+ log_entries.append(f" Sending to Genesis Engine (Attempt {i+1}):\n Model: {model_id}\n Prompt (snippet): {prompt_generate[:150]}...")
90
+ solution_text = call_llm_via_api(prompt_generate, model_id, gen_temp, gen_max_tokens, system_prompt_generate)
91
+ generated_solutions_raw.append(solution_text)
92
+ log_entries.append(f" Genesis Engine Response (Attempt {i+1} - Snippet): {solution_text[:150]}...")
93
+
94
+ if not any(sol and not sol.startswith("LLM API Error") and not sol.startswith("ERROR:") for sol in generated_solutions_raw):
95
+ log_entries.append(" Genesis Engine failed to produce viable candidates.")
96
+ return "No valid solutions generated by the Genesis Engine.", "", "", "\n".join(log_entries)
97
 
98
+ # --- STAGE 2: CRITIQUE CRUCIBLE - RUTHLESS EVALUATION ---
99
+ log_entries.append("\n**Stage 2: Critique Crucible - Evaluating Candidates...**")
100
+ evaluated_solutions_display = []
101
+ evaluated_sols_data = []
102
+ system_prompt_evaluate = "You are a highly critical and insightful AI algorithm evaluator. Your task is to assess a given solution based on clarity, potential correctness, and perceived efficiency. Provide a concise critique and a numerical score from 1 (poor) to 10 (excellent)."
103
+
104
+ for i, sol_text in enumerate(generated_solutions_raw):
105
+ if sol_text.startswith("LLM API Error") or sol_text.startswith("ERROR:"):
106
+ critique = f"Solution {i+1} could not be generated due to an API error."
107
+ score = 0
108
+ else:
109
+ prompt_evaluate = (
110
+ f"Problem Reference: \"{problem_description[:200]}...\"\n"
111
+ f"Evaluate the following proposed solution:\n```\n{sol_text}\n```\n"
112
+ f"Provide your critique and a score (e.g., 'Critique: This is okay. Score: 7/10')."
113
+ )
114
+ log_entries.append(f" Sending to Critique Crucible (Solution {i+1}):\n Model: {model_id}\n Prompt (snippet): {prompt_evaluate[:150]}...")
115
+ evaluation_text = call_llm_via_api(prompt_evaluate, model_id, eval_temp, eval_max_tokens, system_prompt_evaluate)
116
+ log_entries.append(f" Critique Crucible Response (Solution {i+1} - Snippet): {evaluation_text[:150]}...")
117
+
118
+ # Attempt to parse score (this is a simple parser, can be improved)
119
+ parsed_score = 0
120
+ try:
121
+ # Look for "Score: X/10" or "Score: X"
122
+ score_match = [s for s in evaluation_text.split() if s.endswith("/10")]
123
+ if score_match:
124
+ parsed_score = int(score_match[0].split('/')[0].split(':')[-1].strip())
125
+ else: # Try just a number if X/10 not found
126
+ nums = [int(s) for s in evaluation_text.replace(":"," ").split() if s.isdigit()]
127
+ if nums: parsed_score = max(min(nums[-1],10),0) # Take last number, cap at 10
128
+ except ValueError:
129
+ parsed_score = random.randint(3,7) # Fallback if parsing fails
130
+
131
+ critique = evaluation_text
132
+ score = parsed_score
133
+
134
+ evaluated_solutions_display.append(f"**Candidate {i+1}:**\n```text\n{sol_text}\n```\n**Crucible Verdict (Score: {score}/10):**\n{critique}\n---")
135
+ evaluated_sols_data.append({"id": i+1, "solution": sol_text, "score": score, "critique": critique})
136
+
137
+ if not evaluated_sols_data:
138
+ log_entries.append(" Critique Crucible yielded no evaluations.")
139
+ return "\n\n".join(evaluated_solutions_display) if evaluated_solutions_display else "Generation OK, but evaluation failed.", "", "", "\n".join(log_entries)
140
+
141
+ # --- STAGE 3: SELECTION & ASCENSION PREP ---
142
+ evaluated_sols_data.sort(key=lambda x: x["score"], reverse=True)
143
+ best_initial_solution_data = evaluated_sols_data[0]
144
+ log_entries.append(f"\n**Stage 3: Champion Selected - Candidate {best_initial_solution_data['id']} (Score: {best_initial_solution_data['score']}) chosen for evolution.**")
145
+
146
+ # --- STAGE 4: EVOLUTIONARY FORGE - PURSUIT OF PERFECTION ---
147
+ log_entries.append("\n**Stage 4: Evolutionary Forge - Refining the Champion...**")
148
+ system_prompt_evolve = f"You are an elite AI algorithm optimizer. Your task is to take a good solution and make it significantly better, focusing on {problem_type.lower()} best practices, efficiency, or clarity."
149
+ prompt_evolve = (
150
+ f"Original Problem: \"{problem_description}\"\n"
151
+ f"The current leading solution (Score: {best_initial_solution_data['score']}/10) is:\n```\n{best_initial_solution_data['solution']}\n```\n"
152
+ f"Original Critique: \"{best_initial_solution_data['critique']}\"\n"
153
+ f"Your mission: Evolve this solution. Make it demonstrably superior. Explain the key improvements you've made."
154
+ )
155
+ log_entries.append(f" Sending to Evolutionary Forge:\n Model: {model_id}\n Prompt (snippet): {prompt_evolve[:150]}...")
156
+ evolved_solution_text = call_llm_via_api(prompt_evolve, model_id, evolve_temp, evolve_max_tokens, system_prompt_evolve)
157
+ log_entries.append(f" Evolutionary Forge Response (Snippet): {evolved_solution_text[:150]}...")
158
+
159
+ # --- FINAL OUTPUT ASSEMBLY ---
160
+ initial_solutions_output_md = "\n\n".join(evaluated_solutions_display)
161
+ best_solution_output_md = (
162
+ f"**Champion Candidate {best_initial_solution_data['id']} (Original Score: {best_initial_solution_data['score']}/10):**\n"
163
+ f"```text\n{best_initial_solution_data['solution']}\n```\n"
164
+ f"**Original Crucible Verdict:**\n{best_initial_solution_data['critique']}"
165
+ )
166
+ evolved_solution_output_md = f"**✨ AlgoForge Prime™ Evolved Artifact ✨:**\n```text\n{evolved_solution_text}\n```"
167
 
168
+ log_entries.append("\n**AlgoForge Prime™ Cycle Complete.**")
169
+ final_log_output = "\n".join(log_entries)
 
 
 
 
 
 
 
170
 
171
+ return initial_solutions_output_md, best_solution_output_md, evolved_solution_output_md, final_log_output
172
 
173
+ # --- GRADIO UI: THE COMMAND DECK OF ALGOFORGE PRIME™ ---
174
+ intro_markdown = """
175
+ # AlgoForge Prime ✨: Conceptual Algorithmic Evolution
176
+ Welcome, Architect of the Future! I am your humble servant, an AI-driven system inspired by the groundbreaking work of pioneers like Google DeepMind's AlphaEvolve.
177
+ My purpose? To demonstrate a *simplified, conceptual* workflow for AI-assisted algorithm discovery and refinement.
178
+ **This is NOT AlphaEvolve.** This is a creative exploration using powerful Hugging Face LLMs via your `HF_TOKEN`.
179
 
180
+ **The Process, Distilled:**
181
+ 1. **Genesis Engine:** We command an LLM to generate multiple diverse solutions to your problem.
182
+ 2. **Critique Crucible:** Another (or the same) LLM instance evaluates these candidates, scoring them.
183
+ 3. **Evolutionary Forge:** The highest-scoring candidate is fed back to an LLM with the directive: *IMPROVE IT!*
 
 
184
 
185
+ **Your `HF_TOKEN` must be set in this Space's Secrets for AlgoForge Prime™ to function!**
186
  """
187
 
188
+ with gr.Blocks(theme=gr.themes.Monochrome(primary_hue="indigo", secondary_hue="blue"), title="AlgoForge Prime") as demo:
189
+ gr.Markdown(intro_markdown)
190
+
191
+ if not HF_TOKEN:
192
+ gr.Markdown("<h2 style='color:red;'>⚠️ CRITICAL: `HF_TOKEN` is NOT detected in Space Secrets. AlgoForge Prime™ is non-operational. Please add your token.</h2>")
193
 
194
  with gr.Row():
195
  with gr.Column(scale=1):
196
+ gr.Markdown("## 💡 1. Define the Challenge")
197
+ problem_type_dd = gr.Dropdown(
198
+ ["Python Algorithm", "Data Structure Logic", "Mathematical Optimization", "Conceptual System Design", "Pseudocode Refinement"],
199
+ label="Type of Problem/Algorithm",
200
+ value="Python Algorithm"
201
  )
202
+ problem_desc_tb = gr.Textbox(
203
+ lines=5,
204
+ label="Problem Description / Desired Outcome",
205
+ placeholder="e.g., 'Develop a Python function to efficiently find all prime factors of a large integer.' OR 'Design a heuristic for optimizing delivery routes in a dense urban area.'"
206
  )
207
+ initial_hints_tb = gr.Textbox(
208
  lines=3,
209
+ label="Initial Thoughts / Constraints / Seed Ideas (Optional)",
210
+ placeholder="e.g., 'Consider dynamic programming.' OR 'Avoid brute-force if N > 1000.' OR 'Must be implementable in Verilog later.'"
211
  )
 
212
 
213
+ gr.Markdown("## ⚙️ 2. Configure The Forge")
214
+ model_select_dd = gr.Dropdown(
215
+ choices=list(AVAILABLE_MODELS.keys()),
216
+ value=list(AVAILABLE_MODELS.keys())[0], # Default to first model in dict
217
+ label="Select LLM Core Model"
218
+ )
219
+ num_solutions_slider = gr.Slider(1, 5, value=3, step=1, label="Number of Initial Solutions (Genesis Engine)")
220
+
221
+ with gr.Accordion("Advanced LLM Parameters (Tune with Caution!)", open=False):
222
+ gr.Markdown("Higher temperature = more creative/random. Lower = more focused/deterministic.")
223
+ with gr.Row():
224
+ gen_temp_slider = gr.Slider(0.0, 1.5, value=0.7, step=0.1, label="Genesis Temp")
225
+ gen_max_tokens_slider = gr.Slider(100, 1000, value=350, step=50, label="Genesis Max Tokens")
226
+ with gr.Row():
227
+ eval_temp_slider = gr.Slider(0.0, 1.5, value=0.5, step=0.1, label="Crucible Temp")
228
+ eval_max_tokens_slider = gr.Slider(100, 500, value=200, step=50, label="Crucible Max Tokens")
229
+ with gr.Row():
230
+ evolve_temp_slider = gr.Slider(0.0, 1.5, value=0.8, step=0.1, label="Evolution Temp")
231
+ evolve_max_tokens_slider = gr.Slider(100, 1000, value=400, step=50, label="Evolution Max Tokens")
232
+
233
+ submit_btn = gr.Button("🚀 ENGAGE ALGOFORGE PRIME™ 🚀", variant="primary", size="lg")
234
 
235
  with gr.Column(scale=2):
236
+ gr.Markdown("## 🔥 3. The Forge's Output")
237
  with gr.Tabs():
238
+ with gr.TabItem("📜 Genesis Candidates & Crucible Verdicts"):
239
+ output_initial_solutions_md = gr.Markdown(label="LLM-Generated Initial Solutions & Evaluations")
240
+ with gr.TabItem("🏆 Champion Candidate (Pre-Evolution)"):
241
+ output_best_solution_md = gr.Markdown(label="Evaluator's Top Pick")
242
+ with gr.TabItem("🌟 Evolved Artifact"):
243
+ output_evolved_solution_md = gr.Markdown(label="Refined Solution from the Evolutionary Forge")
244
+ with gr.TabItem("🛠️ LLM Interaction Log (SRE View)"):
245
+ output_interaction_log_md = gr.Markdown(label="Detailed Log of LLM Prompts & (Snippets of) Responses")
246
+
247
+ submit_btn.click(
248
+ fn=run_algoforge_simulation,
249
+ inputs=[
250
+ problem_type_dd, problem_desc_tb, initial_hints_tb,
251
+ num_solutions_slider, model_select_dd,
252
+ gen_temp_slider, gen_max_tokens_slider,
253
+ eval_temp_slider, eval_max_tokens_slider,
254
+ evolve_temp_slider, evolve_max_tokens_slider
255
+ ],
256
+ outputs=[
257
+ output_initial_solutions_md, output_best_solution_md,
258
+ output_evolved_solution_md, output_interaction_log_md
259
+ ]
260
  )
261
 
262
  gr.Markdown("---")
263
+ gr.Markdown(
264
+ "**Disclaimer:** As the architect of this marvel, I must remind you: this is a *conceptual demonstration*. Real AI-driven algorithm discovery is vastly more complex and resource-intensive. LLM outputs are probabilistic and require rigorous human oversight and verification. This tool is for inspiration and exploration, not production deployment of unverified algorithms. Handle with brilliance and caution!"
265
+ "\n\n*Powered by Gradio, Hugging Face Inference API, and the boundless spirit of innovation.*"
266
+ )
267
 
268
+ # To launch this magnificent creation:
 
269
  if __name__ == "__main__":
270
+ if not HF_TOKEN:
271
+ print("="*80)
272
+ print("WARNING: HF_TOKEN environment variable not set.")
273
+ print("AlgoForge Prime™ requires this token to communicate with Hugging Face LLMs.")
274
+ print("Please set it in your environment or Space Secrets.")
275
+ print("The UI will load, but LLM functionality will be disabled.")
276
+ print("="*80)
277
+ demo.launch(debug=True) # Debug=True is useful for local development