Spaces:
Sleeping
Sleeping
# algoforge_prime/app.py | |
import gradio as gr | |
import os | |
import time | |
import json # For potentially displaying structured data or passing complex states | |
# --- Core Logic Imports --- | |
from core.llm_clients import initialize_all_clients, is_gemini_api_configured, is_hf_api_configured | |
initialize_all_clients() | |
GEMINI_API_READY = is_gemini_api_configured() | |
HF_API_READY = is_hf_api_configured() | |
from core.generation_engine import generate_initial_solutions | |
from core.evaluation_engine import evaluate_solution_candidate, EvaluationResultOutput | |
from core.evolution_engine import evolve_solution | |
from prompts.system_prompts import get_system_prompt | |
from prompts.prompt_templates import format_code_test_analysis_user_prompt | |
from core.safe_executor import execute_python_code_with_tests, ExecutionResult # For re-evaluating | |
# --- Application Configuration --- | |
# (This section should ideally move to a config file or env vars for production) | |
AVAILABLE_MODELS_CONFIG = {} | |
UI_DEFAULT_MODEL_KEY = None | |
GEMINI_1_5_PRO_LATEST_ID = "gemini-1.5-pro-latest" # Ensure this is the correct ID usable via API | |
GEMINI_1_5_FLASH_LATEST_ID = "gemini-1.5-flash-latest" | |
if GEMINI_API_READY: | |
AVAILABLE_MODELS_CONFIG.update({ | |
f"✨ Google Gemini 1.5 Pro (API)": {"id": GEMINI_1_5_PRO_LATEST_ID, "type": "google_gemini"}, | |
f"⚡ Google Gemini 1.5 Flash (API)": {"id": GEMINI_1_5_FLASH_LATEST_ID, "type": "google_gemini"}, | |
"Legacy Gemini 1.0 Pro (API)": {"id": "gemini-1.0-pro-latest", "type": "google_gemini"}, | |
}) | |
UI_DEFAULT_MODEL_KEY = f"✨ Google Gemini 1.5 Pro (API)" | |
if UI_DEFAULT_MODEL_KEY not in AVAILABLE_MODELS_CONFIG: | |
UI_DEFAULT_MODEL_KEY = f"⚡ Google Gemini 1.5 Flash (API)" | |
else: print("WARNING: app.py - Gemini API not configured.") | |
if HF_API_READY: | |
AVAILABLE_MODELS_CONFIG.update({ | |
"Gemma 2B (HF Test)": {"id": "google/gemma-2b-it", "type": "hf"}, | |
"Mistral 7B (HF)": {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf"}, | |
}) | |
if not UI_DEFAULT_MODEL_KEY: UI_DEFAULT_MODEL_KEY = "Gemma 2B (HF Test)" | |
else: print("WARNING: app.py - HF API not configured.") | |
if not AVAILABLE_MODELS_CONFIG: | |
AVAILABLE_MODELS_CONFIG["No Models Available (Setup API Keys!)"] = {"id": "dummy_error", "type": "none"} | |
UI_DEFAULT_MODEL_KEY = "No Models Available (Setup API Keys!)" | |
elif not UI_DEFAULT_MODEL_KEY and AVAILABLE_MODELS_CONFIG: | |
UI_DEFAULT_MODEL_KEY = list(AVAILABLE_MODELS_CONFIG.keys())[0] | |
# --- UI Customization (Conceptual - real CSS would be in a file) --- | |
# For a "WOW" UI, you'd link a custom CSS file. | |
# Here's a conceptual placeholder for some styles we might imply. | |
APP_THEME = gr.themes.Soft( | |
primary_hue=gr.themes.colors.blue, | |
secondary_hue=gr.themes.colors.sky, | |
neutral_hue=gr.themes.colors.slate, | |
font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"], | |
).set( | |
# Example: input_background_fill="rgba(240, 240, 240, 0.5)" # Slightly transparent inputs | |
# button_primary_background_fill="linear-gradient(to bottom right, hsl(210, 80%, 50%), hsl(210, 100%, 30%))" | |
) | |
# --- Main Orchestration Logic (More detailed progress and error handling for UI) --- | |
def run_algoforge_orchestrator_ui_wrapper( | |
problem_type_selected: str, problem_description_text: str, initial_hints_text: str, | |
user_provided_tests_code: str, num_initial_solutions_to_gen: int, selected_model_ui_key: str, | |
genesis_temp: float, genesis_max_tokens: int, critique_temp: float, critique_max_tokens: int, | |
evolution_temp: float, evolution_max_tokens: int, | |
# Gradio's Request object can give session info if needed for advanced state | |
# request: gr.Request | |
): | |
# This wrapper allows for more fine-grained UI updates via yielding | |
# and handles the overall try-except for better error display. | |
log_accumulator = [f"**AlgoForge Omega™ Cycle Starting at {time.strftime('%Y-%m-%d %H:%M:%S')}**\n"] | |
# Initial state for UI outputs | |
yield { | |
output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🚀 Initializing AlgoForge Omega™...</p>", visible=True), | |
output_initial_solutions_accordion: gr.Accordion(label="⏳ Generating Initial Candidates...", open=False, visible=True), | |
output_initial_solutions_markdown: gr.Markdown(value="Working...", visible=True), | |
output_champion_accordion: gr.Accordion(label="⏳ Awaiting Champion Selection...", open=False, visible=False), | |
output_champion_markdown: gr.Markdown(value="", visible=False), | |
output_evolved_accordion: gr.Accordion(label="⏳ Awaiting Evolution...", open=False, visible=False), | |
output_evolved_markdown: gr.Markdown(value="", visible=False), | |
output_ai_test_analysis_markdown: gr.Markdown(value="", visible=False), | |
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator), visible=True), | |
engage_button: gr.Button(interactive=False) # Disable button during run | |
} | |
try: | |
start_time = time.time() | |
if not problem_description_text.strip(): | |
raise ValueError("Problem Description is mandatory.") | |
current_model_config = AVAILABLE_MODELS_CONFIG.get(selected_model_ui_key) | |
if not current_model_config or current_model_config["type"] == "none": | |
raise ValueError(f"No valid LLM selected ('{selected_model_ui_key}'). Check API key configurations.") | |
log_accumulator.append(f"Selected Model: {selected_model_ui_key} (Type: {current_model_config['type']}, ID: {current_model_config['id']})") | |
log_accumulator.append(f"Problem Type: {problem_type_selected}") | |
log_accumulator.append(f"User Tests Provided: {'Yes' if user_provided_tests_code.strip() else 'No'}\n") | |
yield { output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) } | |
llm_config_genesis = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": genesis_temp, "max_tokens": genesis_max_tokens} | |
llm_config_critique = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": critique_temp, "max_tokens": critique_max_tokens} | |
llm_config_evolution = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": evolution_temp, "max_tokens": evolution_max_tokens} | |
# --- STAGE 1: GENESIS --- | |
yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧬 Stage 1: Genesis Engine - Generating Solutions...</p>") } | |
log_accumulator.append("**------ STAGE 1: GENESIS ENGINE ------**") | |
initial_raw_solutions = generate_initial_solutions(problem_description_text, initial_hints_text, problem_type_selected, num_initial_solutions_to_gen, llm_config_genesis) | |
log_accumulator.append(f"Genesis Engine produced {len(initial_raw_solutions)} raw candidate(s).") | |
for i, sol_text in enumerate(initial_raw_solutions): log_accumulator.append(f" Candidate {i+1} (Raw Snippet): {str(sol_text)[:100]}...") | |
yield { output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) } | |
# --- STAGE 2: CRITIQUE & AUTOMATED EVALUATION --- | |
yield { | |
output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🔬 Stage 2: Critique Crucible - Evaluating Candidates...</p>"), | |
output_initial_solutions_accordion: gr.Accordion(label="Initial Candidates & Evaluations (Processing...)", open=True) | |
} | |
log_accumulator.append("\n**------ STAGE 2: CRITIQUE CRUCIBLE & AUTOMATED EVALUATION ------**") | |
evaluated_candidates_list = [] | |
initial_solutions_md_accumulator = ["**Initial Candidates & Detailed Evaluations:**\n"] | |
for i, candidate_solution_text in enumerate(initial_raw_solutions): | |
log_accumulator.append(f"\n--- Evaluating Candidate {i+1} ---") | |
yield { output_status_bar: gr.HTML(value=f"<p style='color: dodgerblue;'>🔬 Evaluating Candidate {i+1} of {num_initial_solutions_to_gen}...</p>") } | |
evaluation_output_obj = evaluate_solution_candidate(str(candidate_solution_text), problem_description_text, problem_type_selected, user_provided_tests_code, llm_config_critique) | |
evaluated_candidates_list.append({"id": i + 1, "solution_text": str(candidate_solution_text), "evaluation_obj": evaluation_output_obj}) | |
log_accumulator.append(f" Combined Score: {evaluation_output_obj.combined_score}/10") | |
# ... (more detailed logging from evaluation_obj as before) | |
# Update UI with this candidate's evaluation progressively | |
current_eval_md = ( | |
f"**Candidate {i+1} (Score: {evaluation_output_obj.combined_score}/10):**\n" | |
f"```python\n{str(candidate_solution_text)}\n```\n\n" | |
f"**Evaluation Verdict:**\n{evaluation_output_obj.get_display_critique()}\n---" | |
) | |
initial_solutions_md_accumulator.append(current_eval_md) | |
yield { | |
output_initial_solutions_markdown: gr.Markdown(value="\n".join(initial_solutions_md_accumulator)), | |
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) | |
} | |
# --- STAGE 3: SELECTION OF CHAMPION --- | |
yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🏆 Stage 3: Selecting Champion Candidate...</p>") } | |
log_accumulator.append("\n**------ STAGE 3: CHAMPION SELECTION ------**") | |
potentially_viable_candidates = [c for c in evaluated_candidates_list if c["evaluation_obj"] and c["evaluation_obj"].combined_score > 0 and not str(c["solution_text"]).startswith("ERROR")] | |
if not potentially_viable_candidates: | |
raise ValueError("No viable candidate solutions found after evaluation. All attempts may have failed or scored too low.") | |
champion_candidate_data = sorted(potentially_viable_candidates, key=lambda x: x["evaluation_obj"].combined_score, reverse=True)[0] | |
log_accumulator.append(f"Champion Selected: Candidate {champion_candidate_data['id']} with score {champion_candidate_data['evaluation_obj'].combined_score}/10.") | |
champion_display_markdown = ( | |
f"**Champion Candidate ID: {champion_candidate_data['id']} " | |
f"(Original Score: {champion_candidate_data['evaluation_obj'].combined_score}/10):**\n" | |
f"```python\n{champion_candidate_data['solution_text']}\n```\n\n" | |
f"**Original Comprehensive Evaluation:**\n{champion_candidate_data['evaluation_obj'].get_display_critique()}" | |
) | |
yield { | |
output_champion_accordion: gr.Accordion(label=f"🏆 Champion: Candidate {champion_candidate_data['id']} (Score: {champion_candidate_data['evaluation_obj'].combined_score}/10)", open=True, visible=True), | |
output_champion_markdown: gr.Markdown(value=champion_display_markdown, visible=True), | |
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) | |
} | |
# --- STAGE 4: EVOLUTIONARY FORGE --- | |
yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🛠️ Stage 4: Evolutionary Forge - Refining Champion...</p>") } | |
log_accumulator.append("\n**------ STAGE 4: EVOLUTIONARY FORGE ------**") | |
evolved_solution_code = evolve_solution( | |
str(champion_candidate_data["solution_text"]), champion_candidate_data["evaluation_obj"], | |
problem_description_text, problem_type_selected, llm_config_evolution | |
) | |
log_accumulator.append(f"Raw Evolved Solution (Snippet): {str(evolved_solution_code)[:100]}...") | |
evolved_solution_display_markdown = "" | |
ai_test_analysis_markdown = "" | |
if str(evolved_solution_code).startswith("ERROR"): | |
evolved_solution_display_markdown = f"<p style='color: red;'>**Evolution Stage Failed:**<br>{evolved_solution_code}</p>" | |
else: | |
evolved_solution_display_markdown = f"**✨ AlgoForge Omega™ Evolved Artifact ✨:**\n```python\n{evolved_solution_code}\n```" | |
if "python" in problem_type_selected.lower() and user_provided_tests_code.strip(): | |
yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧪 Post-Evolution: Re-testing Evolved Code...</p>") } | |
log_accumulator.append("\n--- Post-Evolution Test of Evolved Code ---") | |
from core.safe_executor import execute_python_code_with_tests # Ensure imported | |
evolved_code_exec_result = execute_python_code_with_tests(str(evolved_solution_code), user_provided_tests_code, timeout_seconds=10) | |
evolved_solution_display_markdown += ( | |
f"\n\n**Post-Evolution Automated Test Results (Simulated):**\n" | |
f" Status: {'SUCCESS' if evolved_code_exec_result.success else 'FAILED/ERRORS'}\n" | |
f" Tests Attempted: {evolved_code_exec_result.total_tests}\n" | |
f" Tests Passed: {evolved_code_exec_result.passed_tests}\n" | |
f" Execution Time: {evolved_code_exec_result.execution_time:.4f}s\n" | |
) | |
if evolved_code_exec_result.compilation_error: evolved_solution_display_markdown += f" Compilation Error: {evolved_code_exec_result.compilation_error}\n" | |
elif evolved_code_exec_result.timeout_error: evolved_solution_display_markdown += f" Timeout Error.\n" | |
elif evolved_code_exec_result.error: evolved_solution_display_markdown += f" Execution Error/Output: {evolved_code_exec_result.overall_error_summary}\n" | |
elif evolved_code_exec_result.stdout: evolved_solution_display_markdown += f" Execution Stdout:\n```\n{evolved_code_exec_result.stdout[:300].strip()}\n```\n" | |
log_accumulator.append(f" Evolved Code Test Results: {evolved_code_exec_result}") | |
if evolved_code_exec_result.total_tests > 0 : | |
yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧠 Post-Evolution: AI Analyzing Test Results...</p>") } | |
log_accumulator.append("\n--- AI Analysis of Evolved Code's Test Results ---") | |
exec_summary_for_analysis = str(evolved_code_exec_result.overall_error_summary or "Tests completed.") | |
analysis_user_prompt = format_code_test_analysis_user_prompt(str(evolved_solution_code), user_provided_tests_code, f"Passed: {evolved_code_exec_result.passed_tests}/{evolved_code_exec_result.total_tests}. Detail: {exec_summary_for_analysis}") | |
analysis_system_prompt = get_system_prompt("code_execution_explainer") | |
llm_analysis_config = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": 0.3, "max_tokens": critique_max_tokens + 200} | |
from core.llm_clients import call_huggingface_api, call_gemini_api | |
explanation_response_obj = None | |
if llm_analysis_config["type"] == "hf": explanation_response_obj = call_huggingface_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt) | |
elif llm_analysis_config["type"] == "google_gemini": explanation_response_obj = call_gemini_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt) | |
if explanation_response_obj and explanation_response_obj.success: | |
ai_test_analysis_markdown = f"**AI Analysis of Evolved Code's Test Performance:**\n{explanation_response_obj.text}" | |
elif explanation_response_obj: | |
ai_test_analysis_markdown = f"<p style='color: orange;'>**AI Analysis of Test Performance Failed:**<br>{explanation_response_obj.error}</p>" | |
log_accumulator.append(f" AI Test Analysis result logged.") | |
total_time = time.time() - start_time | |
log_accumulator.append(f"\n**AlgoForge Omega™ Cycle Complete. Total time: {total_time:.2f} seconds.**") | |
yield { | |
output_status_bar: gr.HTML(value=f"<p style='color: green;'>✅ Cycle Complete! ({total_time:.2f}s)</p>"), | |
output_evolved_accordion: gr.Accordion(label="🌟 Evolved Artifact & Test Analysis", open=True, visible=True), | |
output_evolved_markdown: gr.Markdown(value=evolved_solution_display_markdown, visible=True), | |
output_ai_test_analysis_markdown: gr.Markdown(value=ai_test_analysis_markdown, visible=True if ai_test_analysis_markdown else False), | |
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)), | |
engage_button: gr.Button(interactive=True) # Re-enable button | |
} | |
except ValueError as ve: # Catch our specific input/config errors | |
log_accumulator.append(f"\n**INPUT/CONFIG ERROR:** {ve}") | |
yield { | |
output_status_bar: gr.HTML(value=f"<p style='color: red;'>❌ CONFIGURATION ERROR: {ve}</p>", visible=True), | |
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)), | |
engage_button: gr.Button(interactive=True) | |
} | |
except Exception as e: | |
log_accumulator.append(f"\n**UNEXPECTED RUNTIME ERROR:** {type(e).__name__} - {e}\n{traceback.format_exc()}") | |
# For other outputs, we might want to clear them or show a general error message | |
yield { | |
output_status_bar: gr.HTML(value=f"<p style='color: red;'>❌ UNEXPECTED ERROR: {e}. Check logs.</p>", visible=True), | |
output_initial_solutions_markdown: gr.Markdown(value="An unexpected error occurred. Please check the interaction log."), | |
output_champion_markdown: gr.Markdown(value="Error state."), | |
output_evolved_markdown: gr.Markdown(value="Error state."), | |
output_ai_test_analysis_markdown: gr.Markdown(value="Error state."), | |
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)), | |
engage_button: gr.Button(interactive=True) | |
} | |
# --- Gradio UI Definition --- | |
# (This section is the full UI layout with improvements) | |
css = """ | |
body { font-family: 'Inter', sans-serif; } | |
.gradio-container { max-width: 1280px !important; margin: auto !important; } | |
.gr-button-primary { | |
background: linear-gradient(135deg, #007bff 0%, #0056b3 100%) !important; | |
color: white !important; | |
border: none !important; | |
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1) !important; | |
transition: all 0.2s ease-in-out !important; | |
} | |
.gr-button-primary:hover { | |
transform: translateY(-2px) !important; | |
box-shadow: 0 6px 12px rgba(0, 0, 0, 0.15) !important; | |
} | |
.status-bar p { | |
padding: 8px 12px; | |
border-radius: 6px; | |
font-weight: 500; | |
text-align: center; | |
margin-bottom: 10px; /* Add some space below status bar */ | |
} | |
.accordion-section .gr-markdown { padding-top: 5px; padding-bottom: 5px; } | |
.output-tabs .gr-tabitem {min-height: 400px;} /* Ensure tabs have some min height */ | |
""" | |
with gr.Blocks(theme=APP_THEME, css=css, title="✨ AlgoForge Omega™ ✨") as app_demo: | |
gr.Markdown("# ✨ AlgoForge Omega™ ✨\n### Conceptual AI-Powered Algorithm & Application Foundry") | |
gr.Markdown( | |
"Define a challenge, configure the AI forge, and witness the (conceptual) evolution of solutions, " | |
"now with (simulated) unit testing and more detailed feedback loops!" | |
) | |
with gr.Row(equal_height=False): | |
# --- INPUT COLUMN --- | |
with gr.Column(scale=2, min_width=400): | |
gr.Markdown("## 💡 1. Define the Challenge") | |
with gr.Group(): | |
problem_type_dropdown = gr.Dropdown( | |
choices=["Python Algorithm with Tests", "Python Algorithm (Critique Only)", "General Algorithm Idea", "Conceptual System Design", "Pseudocode Refinement"], | |
label="Problem Type", value="Python Algorithm with Tests", | |
info="Select '...with Tests' to enable (simulated) unit testing if you provide tests below." | |
) | |
problem_description_textbox = gr.Textbox( | |
lines=7, label="Problem Description / Desired Outcome", | |
placeholder="Example for 'Python Algorithm with Tests':\n`def calculate_factorial(n: int) -> int:`\nCalculates factorial of n. Should handle n=0 (returns 1) and raise ValueError for n<0." | |
) | |
initial_hints_textbox = gr.Textbox( | |
lines=4, label="Initial Thoughts / Constraints / Seed Ideas (Optional)", | |
placeholder="E.g., 'Prefer an iterative solution over recursive for factorial.' or 'Consider time complexity and edge cases like empty inputs.'" | |
) | |
user_tests_textbox = gr.Textbox( | |
lines=7, label="Python Unit Tests (Optional, one `assert` per line)", | |
placeholder="assert calculate_factorial(0) == 1\nassert calculate_factorial(5) == 120\n# For expected errors (advanced, simulated):\n# try:\n# calculate_factorial(-1)\n# except ValueError:\n# assert True\n# else:\n# assert False, \"ValueError not raised\"", | |
info="For 'Python Algorithm with Tests'. Ensure function names match your problem description. Basic try-except for error testing is crudely simulated." | |
) | |
gr.Markdown("## ⚙️ 2. Configure The Forge") | |
with gr.Group(): | |
api_status_html = gr.HTML() # For dynamic API status | |
# Logic to set API status text (must be done after initialize_all_clients) | |
status_messages = [] | |
if not GEMINI_API_READY and not HF_API_READY: | |
status_messages.append("<p style='color:red; font-weight:bold;'>⚠️ CRITICAL: NO APIs CONFIGURED. App non-functional.</p>") | |
else: | |
if GEMINI_API_READY: status_messages.append("<p style='color:green;'>✅ Google Gemini API Ready.</p>") | |
else: status_messages.append("<p style='color:orange;'>⚠️ Google Gemini API NOT Ready (Check GOOGLE_API_KEY).</p>") | |
if HF_API_READY: status_messages.append("<p style='color:green;'>✅ Hugging Face API Ready.</p>") | |
else: status_messages.append("<p style='color:orange;'>⚠️ Hugging Face API NOT Ready (Check HF_TOKEN).</p>") | |
api_status_html.value = "".join(status_messages) | |
model_selection_dropdown = gr.Dropdown( | |
choices=list(AVAILABLE_MODELS_CONFIG.keys()), | |
value=UI_DEFAULT_MODEL_KEY if UI_DEFAULT_MODEL_KEY in AVAILABLE_MODELS_CONFIG else (list(AVAILABLE_MODELS_CONFIG.keys())[0] if AVAILABLE_MODELS_CONFIG else None), | |
label="LLM Core Model", | |
info="Ensure the corresponding API key is correctly set in Space Secrets." | |
) | |
num_initial_solutions_slider = gr.Slider(minimum=1, maximum=4, value=2, step=1, label="# Initial Solutions (Genesis Engine)", info="More solutions take longer but provide more diversity.") | |
with gr.Accordion("Advanced LLM Parameters (Tune with Caution!)", open=False): | |
with gr.Row(): | |
genesis_temp_slider = gr.Slider(minimum=0.0, maximum=1.2, value=0.7, step=0.05, label="Genesis Temp") | |
genesis_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1024, step=128, label="Genesis Max Tokens") | |
with gr.Row(): | |
critique_temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.05, label="Critique Temp") | |
critique_max_tokens_slider = gr.Slider(minimum=150, maximum=2048, value=768, step=64, label="Critique Max Tokens") | |
with gr.Row(): | |
evolution_temp_slider = gr.Slider(minimum=0.0, maximum=1.2, value=0.75, step=0.05, label="Evolution Temp") | |
evolution_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1536, step=128, label="Evolution Max Tokens") | |
engage_button = gr.Button("🚀 ENGAGE ALGOFORGE OMEGA™ 🚀", variant="primary", size="lg", elem_id="engage_button_elem") | |
# --- OUTPUT COLUMN --- | |
with gr.Column(scale=3, min_width=600): | |
gr.Markdown("## 🔥 3. The Forge's Output") | |
output_status_bar = gr.HTML(value="<p>Idle. Define a challenge and engage!</p>", elem_classes=["status-bar"], visible=True) | |
with gr.Tabs(elem_id="output_tabs_elem", elem_classes=["output-tabs"]): | |
with gr.TabItem("📜 Initial Candidates & Evaluations", id="tab_initial_evals"): | |
output_initial_solutions_accordion = gr.Accordion(label="Initial Candidates & Evaluations", open=True, visible=False, elem_classes=["accordion-section"]) | |
with output_initial_solutions_accordion: | |
output_initial_solutions_markdown = gr.Markdown(visible=True) | |
with gr.TabItem("🏆 Champion Candidate", id="tab_champion"): | |
output_champion_accordion = gr.Accordion(label="Champion Candidate (Pre-Evolution)", open=True, visible=False, elem_classes=["accordion-section"]) | |
with output_champion_accordion: | |
output_champion_markdown = gr.Markdown(visible=True) | |
with gr.TabItem("🌟 Evolved & Tested", id="tab_evolved"): | |
output_evolved_accordion = gr.Accordion(label="Evolved Artifact & Test Analysis", open=True, visible=False, elem_classes=["accordion-section"]) | |
with output_evolved_accordion: | |
output_evolved_markdown = gr.Markdown(visible=True) | |
output_ai_test_analysis_markdown = gr.Markdown(visible=True, label="AI Analysis of Evolved Code's Tests") | |
with gr.TabItem("🛠️ Interaction Log", id="tab_log"): | |
with gr.Accordion(label="Developer Interaction Log", open=True, elem_classes=["accordion-section"]): # Always open log | |
output_interaction_log_markdown = gr.Markdown(value="Log will appear here...", visible=True) | |
# Connect button to the orchestration function wrapper | |
# The wrapper handles UI updates via yield | |
engage_button.click( | |
fn=run_algoforge_orchestrator_ui_wrapper, # Call the wrapper | |
inputs=[ | |
problem_type_dropdown, problem_description_textbox, initial_hints_textbox, user_tests_textbox, | |
num_initial_solutions_slider, model_selection_dropdown, | |
genesis_temp_slider, genesis_max_tokens_slider, | |
critique_temp_slider, critique_max_tokens_slider, | |
evolution_temp_slider, evolution_max_tokens_slider | |
], | |
outputs=[ # These are the components updated by the `yield` statements | |
output_status_bar, | |
output_initial_solutions_accordion, output_initial_solutions_markdown, | |
output_champion_accordion, output_champion_markdown, | |
output_evolved_accordion, output_evolved_markdown, output_ai_test_analysis_markdown, | |
output_interaction_log_markdown, | |
engage_button # To disable/re-enable it | |
] | |
) | |
gr.Markdown("---") | |
gr.Markdown( | |
"**Disclaimer:** This is a conceptual, educational demonstration. " | |
"The (simulated) unit testing feature is for illustrative purposes. " | |
"**NEVER run LLM-generated code from an untrusted source in an unrestricted environment.** " | |
"Implementing robust and secure code sandboxing is complex and absolutely critical for safety in real-world applications. " | |
"LLM outputs always require careful human review and verification." | |
) | |
gr.HTML("<p style='text-align:center; font-size:0.9em; color:grey;'>AlgoForge Omega™ - Powered by Gradio, Gemini & Hugging Face Models</p>") | |
# --- Entry Point for Running the Gradio App --- | |
if __name__ == "__main__": | |
print("="*80) | |
print("AlgoForge Omega™ Conceptual Demo (WOW UI Attempt) - Launching...") | |
print(f" Google Gemini API Configured (from app.py check): {GEMINI_API_READY}") | |
print(f" Hugging Face API Configured (from app.py check): {HF_API_READY}") | |
if not GEMINI_API_READY and not HF_API_READY: | |
print(" CRITICAL WARNING: No API keys seem to be configured correctly. The application will likely be non-functional.") | |
print(f" UI Default Model Key: {UI_DEFAULT_MODEL_KEY}") | |
print(f" Available models for UI: {list(AVAILABLE_MODELS_CONFIG.keys())}") | |
print("="*80) | |
app_demo.launch(debug=True, server_name="0.0.0.0") |