Spaces:
Sleeping
Sleeping
File size: 25,796 Bytes
0751433 7dbc041 c984bb4 7dbc041 0751433 c984bb4 0751433 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 0751433 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 0751433 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 7dbc041 c984bb4 0751433 c984bb4 0751433 c984bb4 0751433 7dbc041 c984bb4 7dbc041 0751433 7dbc041 c984bb4 0751433 7dbc041 0751433 c984bb4 7dbc041 c984bb4 0751433 7dbc041 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 |
import gradio as gr
from huggingface_hub import InferenceClient # Still needed for HF fallbacks
import google.generativeai as genai # For Google Gemini API
import os
import random
# --- ALGOFORGE PRIME™ CONFIGURATION & SECRETS ---
# Google API Key - ESSENTIAL for Google Gemini Pro/Flash models via their API
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
GEMINI_API_CONFIGURED = False
if GOOGLE_API_KEY:
try:
genai.configure(api_key=GOOGLE_API_KEY)
GEMINI_API_CONFIGURED = True
print("INFO: Google Gemini API configured successfully.")
except Exception as e:
print(f"ERROR: Failed to configure Google Gemini API with provided key: {e}. Gemini models will be unavailable.")
# GOOGLE_API_KEY = None # Effectively disables it if config fails
else:
print("WARNING: GOOGLE_API_KEY not found in Space Secrets. Google Gemini API models will be disabled.")
# Hugging Face Token - For Hugging Face hosted models (fallbacks or alternatives)
HF_TOKEN = os.getenv("HF_TOKEN")
HF_API_CONFIGURED = False
if not HF_TOKEN:
print("WARNING: HF_TOKEN not found in Space Secrets. Calls to Hugging Face hosted models will be disabled.")
else:
HF_API_CONFIGURED = True
print("INFO: HF_TOKEN detected. Hugging Face hosted models can be used.")
# Initialize Hugging Face Inference Client (conditionally)
hf_inference_client = None
if HF_API_CONFIGURED:
try:
hf_inference_client = InferenceClient(token=HF_TOKEN)
print("INFO: Hugging Face InferenceClient initialized successfully.")
except Exception as e:
print(f"ERROR: Failed to initialize Hugging Face InferenceClient: {e}. HF models will be unavailable.")
HF_API_CONFIGURED = False # Mark as not configured if client init fails
# --- MODEL DEFINITIONS ---
AVAILABLE_MODELS = {}
DEFAULT_MODEL_KEY = None
# Populate with Gemini models if API is configured
if GEMINI_API_CONFIGURED:
AVAILABLE_MODELS.update({
"Google Gemini 1.5 Flash (API - Fast, Recommended)": {"id": "gemini-1.5-flash-latest", "type": "google_gemini"},
"Google Gemini 1.0 Pro (API)": {"id": "gemini-1.0-pro-latest", "type": "google_gemini"},
})
DEFAULT_MODEL_KEY = "Google Gemini 1.5 Flash (API - Fast, Recommended)"
# Populate with Hugging Face models if API is configured (as alternatives/fallbacks)
if HF_API_CONFIGURED:
AVAILABLE_MODELS.update({
"Google Gemma 2B (HF - Quick Test)": {"id": "google/gemma-2b-it", "type": "hf"},
"Mistral 7B Instruct (HF)": {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf"},
"CodeLlama 7B Instruct (HF)": {"id": "codellama/CodeLlama-7b-Instruct-hf", "type": "hf"},
})
if not DEFAULT_MODEL_KEY: # If Gemini isn't configured, default to an HF model
DEFAULT_MODEL_KEY = "Google Gemma 2B (HF - Quick Test)"
# Absolute fallback if no models could be configured
if not AVAILABLE_MODELS:
print("CRITICAL ERROR: No models could be configured. Neither Google API Key nor HF Token seem to be working or present.")
# Add a dummy entry to prevent crashes, though the app will be non-functional
AVAILABLE_MODELS["No Models Available"] = {"id": "dummy", "type": "none"}
DEFAULT_MODEL_KEY = "No Models Available"
elif not DEFAULT_MODEL_KEY: # If somehow DEFAULT_MODEL_KEY is still None but AVAILABLE_MODELS is not empty
DEFAULT_MODEL_KEY = list(AVAILABLE_MODELS.keys())[0]
# --- CORE AI ENGINEERING: LLM INTERACTION FUNCTIONS ---
def call_huggingface_llm_api(prompt_text, model_id, temperature=0.7, max_new_tokens=350, system_prompt=None):
if not HF_API_CONFIGURED or not hf_inference_client:
return "ERROR: Hugging Face API is not configured (HF_TOKEN missing or client init failed)."
if system_prompt:
full_prompt = f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n{prompt_text} [/INST]"
else:
full_prompt = prompt_text
try:
use_sample = temperature > 0.0
response_text = hf_inference_client.text_generation(
full_prompt, model=model_id, max_new_tokens=max_new_tokens,
temperature=temperature if use_sample else None,
do_sample=use_sample, stream=False
)
return response_text
except Exception as e:
error_details = f"Error Type: {type(e).__name__}, Message: {str(e)}"
print(f"Hugging Face LLM API Call Error ({model_id}): {error_details}")
return f"LLM API Error (Hugging Face Model: {model_id}). Details: {error_details}. Check Space logs."
def call_google_gemini_api(prompt_text, model_id, temperature=0.7, max_new_tokens=400, system_prompt=None):
if not GEMINI_API_CONFIGURED:
return "ERROR: Google Gemini API is not configured (GOOGLE_API_KEY missing or config failed)."
try:
# For gemini-1.5-flash and newer, system_instruction is the preferred way.
# For older gemini-1.0-pro, you might need to structure the 'contents' array.
model_instance = genai.GenerativeModel(model_name=model_id, system_instruction=system_prompt if system_prompt else None)
generation_config = genai.types.GenerationConfig(
temperature=temperature,
max_output_tokens=max_new_tokens
)
# Simple user prompt if system_instruction is handled by GenerativeModel
response = model_instance.generate_content(
prompt_text, # Just the user prompt
generation_config=generation_config,
stream=False
)
# Robust check for response content and safety blocks
if response.prompt_feedback and response.prompt_feedback.block_reason:
block_reason_msg = response.prompt_feedback.block_reason_message or response.prompt_feedback.block_reason
print(f"Google Gemini API: Prompt blocked. Reason: {block_reason_msg}")
return f"Google Gemini API Error: Your prompt was blocked. Reason: {block_reason_msg}. Try rephrasing."
if not response.candidates or not response.candidates[0].content.parts:
# Check if any candidate has content
candidate_had_content = any(cand.content and cand.content.parts for cand in response.candidates)
if not candidate_had_content:
finish_reason = response.candidates[0].finish_reason if response.candidates else "Unknown"
# Specific check for safety if that's the finish reason
if str(finish_reason).upper() == "SAFETY":
print(f"Google Gemini API: Response generation stopped due to safety settings. Finish Reason: {finish_reason}")
return f"Google Gemini API Error: Response generation stopped due to safety settings. Finish Reason: {finish_reason}. Try a different prompt or adjust safety settings in your Google AI Studio if possible."
else:
print(f"Google Gemini API: Empty response or no content parts. Finish Reason: {finish_reason}")
return f"Google Gemini API Error: Empty response or no content generated. Finish Reason: {finish_reason}. The model might not have had anything to say or the request was malformed."
# Assuming the first candidate has the primary response
return response.candidates[0].content.parts[0].text
except Exception as e:
error_details = f"Error Type: {type(e).__name__}, Message: {str(e)}"
print(f"Google Gemini API Call Error ({model_id}): {error_details}")
# Provide more specific feedback for common errors if possible
if "API key not valid" in str(e) or "PERMISSION_DENIED" in str(e):
return f"LLM API Error (Google Gemini Model: {model_id}). Details: API key invalid or permission denied. Please check your GOOGLE_API_KEY and ensure the Gemini API is enabled for your project. Original error: {error_details}"
elif "Could not find model" in str(e):
return f"LLM API Error (Google Gemini Model: {model_id}). Details: Model ID '{model_id}' not found or not accessible with your key. Original error: {error_details}"
return f"LLM API Error (Google Gemini Model: {model_id}). Details: {error_details}. Check Space logs."
# --- ALGOFORGE PRIME™ - THE GRAND ORCHESTRATOR ---
# (This function remains largely the same as the previous "full rewrite",
# as the dispatch_llm_call logic handles routing to the correct API call function.
# I will include it for completeness but highlight any minor adjustments if needed.)
def run_algoforge_simulation(
problem_type, problem_description, initial_hints,
num_initial_solutions, selected_model_key,
gen_temp, gen_max_tokens,
eval_temp, eval_max_tokens,
evolve_temp, evolve_max_tokens
):
if not problem_description:
return "ERROR: Problem Description is the lifeblood of innovation! Please provide it.", "", "", ""
model_info = AVAILABLE_MODELS.get(selected_model_key)
if not model_info or model_info["type"] == "none":
return f"ERROR: No valid model selected or available. Please check API key configurations. Selected: '{selected_model_key}'", "", "", ""
model_id = model_info["id"]
model_type = model_info["type"]
log_entries = [f"**AlgoForge Prime™ Initializing...**\nSelected Model Core: {model_id} ({selected_model_key} - Type: {model_type})\nProblem Type: {problem_type}"]
def dispatch_llm_call(prompt, system_p, temp, max_tok, stage_name=""):
log_entries.append(f" Dispatching to {model_type.upper()} API for {stage_name} (Model: {model_id}):\n Prompt (snippet): {prompt[:100]}...")
if system_p: log_entries[-1] += f"\n System Prompt (snippet): {system_p[:100]}..."
if model_type == "hf":
if not HF_API_CONFIGURED: return "ERROR: HF_TOKEN not configured or InferenceClient failed."
result = call_huggingface_llm_api(prompt, model_id, temp, max_tok, system_p)
elif model_type == "google_gemini":
if not GEMINI_API_CONFIGURED: return "ERROR: GOOGLE_API_KEY not configured or Gemini API setup failed."
result = call_google_gemini_api(prompt, model_id, temp, max_tok, system_p)
else:
result = f"ERROR: Unknown model type '{model_type}' for selected model."
log_entries.append(f" {model_type.upper()} API Response ({stage_name} - Snippet): {str(result)[:150]}...")
return result
# STAGE 1: GENESIS
log_entries.append("\n**Stage 1: Genesis Engine - Generating Initial Solution Candidates...**")
generated_solutions_raw = []
system_prompt_generate = f"You are an expert {problem_type.lower().replace(' ', '_')} algorithm designer. Your goal is to brainstorm multiple diverse solutions to the user's problem."
for i in range(num_initial_solutions):
user_prompt_generate = (
f"Problem Description: \"{problem_description}\"\n"
f"Consider these initial thoughts/constraints: \"{initial_hints if initial_hints else 'None'}\"\n"
f"Please provide one distinct and complete solution/algorithm for this problem. "
f"This is solution attempt #{i+1} of {num_initial_solutions}. Try a different approach if possible."
)
solution_text = dispatch_llm_call(user_prompt_generate, system_prompt_generate, gen_temp, gen_max_tokens, f"Genesis Attempt {i+1}")
generated_solutions_raw.append(solution_text)
if not any(sol and not str(sol).startswith("ERROR:") and not str(sol).startswith("LLM API Error") for sol in generated_solutions_raw):
log_entries.append(" Genesis Engine failed to produce viable candidates or all calls resulted in errors.")
initial_sol_output = "No valid solutions generated by the Genesis Engine. All attempts failed or returned errors."
if generated_solutions_raw:
initial_sol_output += "\n\nErrors Encountered:\n" + "\n".join([f"- {str(s)}" for s in generated_solutions_raw if str(s).startswith("ERROR") or str(s).startswith("LLM API Error")])
return initial_sol_output, "", "", "\n".join(log_entries)
# STAGE 2: CRITIQUE
log_entries.append("\n**Stage 2: Critique Crucible - Evaluating Candidates...**")
evaluated_solutions_display = []
evaluated_sols_data = []
system_prompt_evaluate = "You are a highly critical and insightful AI algorithm evaluator. Assess the provided solution based on clarity, potential correctness, and perceived efficiency. Provide a concise critique and a numerical score from 1 (poor) to 10 (excellent). CRITICALLY: You MUST include the score in the format 'Score: X/10' where X is an integer."
for i, sol_text_candidate in enumerate(generated_solutions_raw):
sol_text = str(sol_text_candidate)
critique_text = f"Critique for Candidate {i+1}" # Placeholder
score = 0
if sol_text.startswith("ERROR:") or sol_text.startswith("LLM API Error"):
critique_text = f"Candidate {i+1} could not be properly generated due to an earlier API error: {sol_text}"
score = 0
else:
user_prompt_evaluate = (
f"Problem Reference (for context only, do not repeat in output): \"{problem_description[:150]}...\"\n\n"
f"Now, evaluate the following proposed solution:\n```\n{sol_text}\n```\n"
f"Provide your critique and ensure you output a score in the format 'Score: X/10'."
)
evaluation_text = str(dispatch_llm_call(user_prompt_evaluate, system_prompt_evaluate, eval_temp, eval_max_tokens, f"Critique Candidate {i+1}"))
critique_text = evaluation_text # Default to full response
if evaluation_text.startswith("ERROR:") or evaluation_text.startswith("LLM API Error"):
critique_text = f"Error during evaluation of Candidate {i+1}: {evaluation_text}"
score = 0
else:
# Try to parse score
score_match_found = False
if "Score:" in evaluation_text:
try:
# More robust parsing for "Score: X/10" or "Score: X"
score_part_full = evaluation_text.split("Score:")[1].strip()
score_num_str = score_part_full.split("/")[0].split()[0].strip() # Get number before / or space
parsed_score_val = int(score_num_str)
score = max(1, min(parsed_score_val, 10)) # Clamp score
score_match_found = True
except (ValueError, IndexError, TypeError):
log_entries.append(f" Warning: Could not parse score accurately from: '{evaluation_text}' despite 'Score:' marker.")
if not score_match_found: # Fallback if parsing fails or marker missing
log_entries.append(f" Warning: 'Score:' marker missing or unparsable in evaluation: '{evaluation_text}'. Assigning random score.")
score = random.randint(3, 7)
evaluated_solutions_display.append(f"**Candidate {i+1}:**\n```text\n{sol_text}\n```\n**Crucible Verdict (Score: {score}/10):**\n{critique_text}\n---")
evaluated_sols_data.append({"id": i+1, "solution": sol_text, "score": score, "critique": critique_text})
if not evaluated_sols_data or all(s['score'] == 0 for s in evaluated_sols_data):
log_entries.append(" Critique Crucible yielded no valid evaluations or all solutions had errors.")
current_output = "\n\n".join(evaluated_solutions_display) if evaluated_solutions_display else "Generation might be OK, but evaluation failed for all candidates."
return current_output, "", "", "\n".join(log_entries)
# STAGE 3: SELECTION
evaluated_sols_data.sort(key=lambda x: x["score"], reverse=True)
best_initial_solution_data = evaluated_sols_data[0]
log_entries.append(f"\n**Stage 3: Champion Selected - Candidate {best_initial_solution_data['id']} (Score: {best_initial_solution_data['score']}) chosen for evolution.**")
if best_initial_solution_data['solution'].startswith("ERROR:") or best_initial_solution_data['solution'].startswith("LLM API Error"):
log_entries.append(" ERROR: Selected champion solution itself is an error message. Cannot evolve.")
return "\n\n".join(evaluated_solutions_display), f"Selected champion was an error: {best_initial_solution_data['solution']}", "Cannot evolve an error.", "\n".join(log_entries)
# STAGE 4: EVOLUTION
log_entries.append("\n**Stage 4: Evolutionary Forge - Refining the Champion...**")
system_prompt_evolve = f"You are an elite AI algorithm optimizer and refiner. Your task is to take the provided solution and make it significantly better. Focus on {problem_type.lower()} best practices, improve efficiency or clarity, fix any potential errors, and expand on it if appropriate. Explain the key improvements you've made clearly."
user_prompt_evolve = (
f"Original Problem (for context): \"{problem_description}\"\n\n"
f"The current leading solution (which had a score of {best_initial_solution_data['score']}/10) is:\n```\n{best_initial_solution_data['solution']}\n```\n"
f"The original critique for this solution was: \"{best_initial_solution_data['critique']}\"\n\n"
f"Your mission: Evolve this solution. Make it demonstrably superior. If the original solution was just a sketch, flesh it out. If it had flaws, fix them. If it was good, make it great. Explain the key improvements you've made as part of your response."
)
evolved_solution_text = str(dispatch_llm_call(user_prompt_evolve, system_prompt_evolve, evolve_temp, evolve_max_tokens, "Evolution"))
if evolved_solution_text.startswith("ERROR:") or evolved_solution_text.startswith("LLM API Error"):
log_entries.append(" ERROR: Evolution step resulted in an API error.")
evolved_solution_output_md = f"**Evolution Failed:**\n{evolved_solution_text}"
else:
evolved_solution_output_md = f"**✨ AlgoForge Prime™ Evolved Artifact ✨:**\n```text\n{evolved_solution_text}\n```"
# FINAL OUTPUT ASSEMBLY
initial_solutions_output_md = "\n\n".join(evaluated_solutions_display)
best_solution_output_md = (
f"**Champion Candidate {best_initial_solution_data['id']} (Original Score: {best_initial_solution_data['score']}/10):**\n"
f"```text\n{best_initial_solution_data['solution']}\n```\n"
f"**Original Crucible Verdict:**\n{best_initial_solution_data['critique']}"
)
log_entries.append("\n**AlgoForge Prime™ Cycle Complete.**")
final_log_output = "\n".join(log_entries)
return initial_solutions_output_md, best_solution_output_md, evolved_solution_output_md, final_log_output
# --- GRADIO UI ---
intro_markdown = """
# ✨ AlgoForge Prime™ ✨: Conceptual Algorithmic Evolution (Gemini Focused)
Welcome! This system demonstrates AI-assisted algorithm discovery and refinement, with a primary focus on **Google Gemini API models**.
Hugging Face hosted models are available as alternatives if configured.
**This is a conceptual demo, not AlphaEvolve itself.**
**API Keys Required in Space Secrets:**
- `GOOGLE_API_KEY` (Primary): For Google Gemini API models (e.g., Gemini 1.5 Flash, Gemini 1.0 Pro).
- `HF_TOKEN` (Secondary): For Hugging Face hosted models (e.g., Gemma on HF, Mistral).
If a key is missing, corresponding models will be unusable or limited.
"""
token_status_md = ""
if not GEMINI_API_CONFIGURED and not HF_API_CONFIGURED:
token_status_md = "<p style='color:red;'>⚠️ CRITICAL: NEITHER GOOGLE_API_KEY NOR HF_TOKEN are configured or working. The application will not function.</p>"
else:
if GEMINI_API_CONFIGURED:
token_status_md += "<p style='color:green;'>✅ Google Gemini API Key detected and configured.</p>"
else:
token_status_md += "<p style='color:orange;'>⚠️ GOOGLE_API_KEY missing or failed to configure. Gemini API models disabled.</p>"
if HF_API_CONFIGURED:
token_status_md += "<p style='color:green;'>✅ Hugging Face API Token detected and client initialized.</p>"
else:
token_status_md += "<p style='color:orange;'>⚠️ HF_TOKEN missing or client failed to initialize. Hugging Face models disabled.</p>"
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"), title="AlgoForge Prime™ (Gemini)") as demo: # Changed theme
gr.Markdown(intro_markdown)
gr.HTML(token_status_md)
if not AVAILABLE_MODELS or DEFAULT_MODEL_KEY == "No Models Available":
gr.Markdown("<h2 style='color:red;'>No models are available. Please check your API key configurations in Space Secrets and restart the Space.</h2>")
else:
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("## 💡 1. Define the Challenge")
problem_type_dd = gr.Dropdown(
["Python Algorithm", "Data Structure Logic", "Mathematical Optimization", "Conceptual System Design", "Pseudocode Refinement", "Verilog Snippet Idea", "General Brainstorming"],
label="Type of Problem/Algorithm", value="Python Algorithm"
)
problem_desc_tb = gr.Textbox(
lines=5, label="Problem Description / Desired Outcome",
placeholder="e.g., 'Efficient Python function for Fibonacci sequence using memoization.'"
)
initial_hints_tb = gr.Textbox(
lines=3, label="Initial Thoughts / Constraints / Seed Ideas (Optional)",
placeholder="e.g., 'Focus on clarity and correctness.' OR 'Target O(n) complexity.'"
)
gr.Markdown("## ⚙️ 2. Configure The Forge")
model_select_dd = gr.Dropdown(
choices=list(AVAILABLE_MODELS.keys()),
value=DEFAULT_MODEL_KEY if DEFAULT_MODEL_KEY in AVAILABLE_MODELS else (list(AVAILABLE_MODELS.keys())[0] if AVAILABLE_MODELS else None), # Ensure default is valid
label="Select LLM Core Model"
)
num_solutions_slider = gr.Slider(1, 4, value=2, step=1, label="Number of Initial Solutions (Genesis Engine)")
with gr.Accordion("Advanced LLM Parameters", open=False):
with gr.Row():
gen_temp_slider = gr.Slider(0.0, 1.0, value=0.7, step=0.05, label="Genesis Temp") # Gemini often uses 0-1 range
gen_max_tokens_slider = gr.Slider(100, 2048, value=512, step=64, label="Genesis Max Tokens")
with gr.Row():
eval_temp_slider = gr.Slider(0.0, 1.0, value=0.4, step=0.05, label="Crucible Temp")
eval_max_tokens_slider = gr.Slider(100, 1024, value=300, step=64, label="Crucible Max Tokens")
with gr.Row():
evolve_temp_slider = gr.Slider(0.0, 1.0, value=0.75, step=0.05, label="Evolution Temp")
evolve_max_tokens_slider = gr.Slider(100, 2048, value=768, step=64, label="Evolution Max Tokens")
submit_btn = gr.Button("🚀 ENGAGE ALGOFORGE PRIME™ 🚀", variant="primary", size="lg")
with gr.Column(scale=2):
gr.Markdown("## 🔥 3. The Forge's Output")
with gr.Tabs():
with gr.TabItem("📜 Genesis Candidates & Crucible Verdicts"):
output_initial_solutions_md = gr.Markdown(label="LLM-Generated Initial Solutions & Evaluations")
with gr.TabItem("🏆 Champion Candidate (Pre-Evolution)"):
output_best_solution_md = gr.Markdown(label="Evaluator's Top Pick")
with gr.TabItem("🌟 Evolved Artifact"):
output_evolved_solution_md = gr.Markdown(label="Refined Solution from the Evolutionary Forge")
with gr.TabItem("🛠️ Interaction Log (Dev View)"):
output_interaction_log_md = gr.Markdown(label="Detailed Log of LLM Prompts & Responses")
submit_btn.click(
fn=run_algoforge_simulation,
inputs=[
problem_type_dd, problem_desc_tb, initial_hints_tb,
num_solutions_slider, model_select_dd,
gen_temp_slider, gen_max_tokens_slider,
eval_temp_slider, eval_max_tokens_slider,
evolve_temp_slider, evolve_max_tokens_slider
],
outputs=[
output_initial_solutions_md, output_best_solution_md,
output_evolved_solution_md, output_interaction_log_md
]
)
gr.Markdown("---")
gr.Markdown(
"**Disclaimer:** This is a conceptual demo. LLM outputs require rigorous human oversight. Use for inspiration and exploration."
"\n*Powered by Gradio, Google Gemini API, Hugging Face Inference API, and innovation.*"
)
if __name__ == "__main__":
print("="*80)
print("AlgoForge Prime™ (Gemini Focused) Starting...")
if not GEMINI_API_CONFIGURED: print("REMINDER: GOOGLE_API_KEY missing or config failed. Gemini API models disabled.")
if not HF_API_CONFIGURED: print("REMINDER: HF_TOKEN missing or client init failed. Hugging Face models disabled.")
if not GEMINI_API_CONFIGURED and not HF_API_CONFIGURED: print("CRITICAL: NEITHER API IS CONFIGURED. APP WILL NOT FUNCTION.")
print(f"UI will attempt to default to model key: {DEFAULT_MODEL_KEY}")
print(f"Available models for UI: {list(AVAILABLE_MODELS.keys())}")
print("="*80)
demo.launch(debug=True, server_name="0.0.0.0") |