File size: 29,197 Bytes
ded730b
0751433
c984bb4
9826cfc
 
7dbc041
d706935
9826cfc
 
4fa5bec
 
 
9b1a7e0
ded730b
9826cfc
ded730b
32333bf
bebdc57
9826cfc
ded730b
9826cfc
 
bebdc57
9826cfc
 
4fa5bec
d706935
4fa5bec
bebdc57
9826cfc
 
 
7dbc041
9826cfc
d706935
9826cfc
 
d706935
4fa5bec
bebdc57
9826cfc
 
7dbc041
9826cfc
 
d706935
bebdc57
9826cfc
 
d706935
 
9826cfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bebdc57
9826cfc
 
c984bb4
9826cfc
 
d706935
9826cfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bebdc57
9826cfc
 
 
 
 
 
 
 
 
 
 
 
ded730b
9826cfc
 
 
 
ded730b
9826cfc
 
 
 
 
 
 
 
 
d706935
9826cfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bebdc57
0751433
bebdc57
9826cfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0751433
9826cfc
 
 
 
 
 
d706935
9826cfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ded730b
9826cfc
 
bebdc57
9826cfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7dbc041
9826cfc
0751433
9826cfc
 
 
 
 
 
 
 
 
6aa264c
bebdc57
0751433
7dbc041
9826cfc
 
 
d706935
 
 
 
 
4fa5bec
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
# algoforge_prime/app.py
import gradio as gr
import os
import time
import json # For potentially displaying structured data or passing complex states

# --- Core Logic Imports ---
from core.llm_clients import initialize_all_clients, is_gemini_api_configured, is_hf_api_configured
initialize_all_clients() 

GEMINI_API_READY = is_gemini_api_configured()
HF_API_READY = is_hf_api_configured()

from core.generation_engine import generate_initial_solutions
from core.evaluation_engine import evaluate_solution_candidate, EvaluationResultOutput
from core.evolution_engine import evolve_solution
from prompts.system_prompts import get_system_prompt
from prompts.prompt_templates import format_code_test_analysis_user_prompt
from core.safe_executor import execute_python_code_with_tests, ExecutionResult # For re-evaluating

# --- Application Configuration ---
# (This section should ideally move to a config file or env vars for production)
AVAILABLE_MODELS_CONFIG = {}
UI_DEFAULT_MODEL_KEY = None
GEMINI_1_5_PRO_LATEST_ID = "gemini-1.5-pro-latest" # Ensure this is the correct ID usable via API
GEMINI_1_5_FLASH_LATEST_ID = "gemini-1.5-flash-latest"

if GEMINI_API_READY:
    AVAILABLE_MODELS_CONFIG.update({
        f"✨ Google Gemini 1.5 Pro (API)": {"id": GEMINI_1_5_PRO_LATEST_ID, "type": "google_gemini"},
        f"⚡ Google Gemini 1.5 Flash (API)": {"id": GEMINI_1_5_FLASH_LATEST_ID, "type": "google_gemini"},
        "Legacy Gemini 1.0 Pro (API)": {"id": "gemini-1.0-pro-latest", "type": "google_gemini"},
    })
    UI_DEFAULT_MODEL_KEY = f"✨ Google Gemini 1.5 Pro (API)"
    if UI_DEFAULT_MODEL_KEY not in AVAILABLE_MODELS_CONFIG: 
        UI_DEFAULT_MODEL_KEY = f"⚡ Google Gemini 1.5 Flash (API)"
else: print("WARNING: app.py - Gemini API not configured.")

if HF_API_READY:
    AVAILABLE_MODELS_CONFIG.update({
        "Gemma 2B (HF Test)": {"id": "google/gemma-2b-it", "type": "hf"},
        "Mistral 7B (HF)": {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf"},
    })
    if not UI_DEFAULT_MODEL_KEY: UI_DEFAULT_MODEL_KEY = "Gemma 2B (HF Test)"
else: print("WARNING: app.py - HF API not configured.")

if not AVAILABLE_MODELS_CONFIG:
    AVAILABLE_MODELS_CONFIG["No Models Available (Setup API Keys!)"] = {"id": "dummy_error", "type": "none"}
    UI_DEFAULT_MODEL_KEY = "No Models Available (Setup API Keys!)"
elif not UI_DEFAULT_MODEL_KEY and AVAILABLE_MODELS_CONFIG: 
    UI_DEFAULT_MODEL_KEY = list(AVAILABLE_MODELS_CONFIG.keys())[0]

# --- UI Customization (Conceptual - real CSS would be in a file) ---
# For a "WOW" UI, you'd link a custom CSS file.
# Here's a conceptual placeholder for some styles we might imply.
APP_THEME = gr.themes.Soft(
    primary_hue=gr.themes.colors.blue, 
    secondary_hue=gr.themes.colors.sky,
    neutral_hue=gr.themes.colors.slate,
    font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
).set(
    # Example: input_background_fill="rgba(240, 240, 240, 0.5)" # Slightly transparent inputs
    # button_primary_background_fill="linear-gradient(to bottom right, hsl(210, 80%, 50%), hsl(210, 100%, 30%))"
)

# --- Main Orchestration Logic (More detailed progress and error handling for UI) ---
def run_algoforge_orchestrator_ui_wrapper(
    problem_type_selected: str, problem_description_text: str, initial_hints_text: str, 
    user_provided_tests_code: str, num_initial_solutions_to_gen: int, selected_model_ui_key: str,
    genesis_temp: float, genesis_max_tokens: int, critique_temp: float, critique_max_tokens: int,
    evolution_temp: float, evolution_max_tokens: int,
    # Gradio's Request object can give session info if needed for advanced state
    # request: gr.Request 
):
    # This wrapper allows for more fine-grained UI updates via yielding
    # and handles the overall try-except for better error display.

    log_accumulator = [f"**AlgoForge Omega™ Cycle Starting at {time.strftime('%Y-%m-%d %H:%M:%S')}**\n"]
    # Initial state for UI outputs
    yield {
        output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🚀 Initializing AlgoForge Omega™...</p>", visible=True),
        output_initial_solutions_accordion: gr.Accordion(label="⏳ Generating Initial Candidates...", open=False, visible=True),
        output_initial_solutions_markdown: gr.Markdown(value="Working...", visible=True),
        output_champion_accordion: gr.Accordion(label="⏳ Awaiting Champion Selection...", open=False, visible=False),
        output_champion_markdown: gr.Markdown(value="", visible=False),
        output_evolved_accordion: gr.Accordion(label="⏳ Awaiting Evolution...", open=False, visible=False),
        output_evolved_markdown: gr.Markdown(value="", visible=False),
        output_ai_test_analysis_markdown: gr.Markdown(value="", visible=False),
        output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator), visible=True),
        engage_button: gr.Button(interactive=False) # Disable button during run
    }

    try:
        start_time = time.time()

        if not problem_description_text.strip():
            raise ValueError("Problem Description is mandatory.")

        current_model_config = AVAILABLE_MODELS_CONFIG.get(selected_model_ui_key)
        if not current_model_config or current_model_config["type"] == "none":
            raise ValueError(f"No valid LLM selected ('{selected_model_ui_key}'). Check API key configurations.")
        
        log_accumulator.append(f"Selected Model: {selected_model_ui_key} (Type: {current_model_config['type']}, ID: {current_model_config['id']})")
        log_accumulator.append(f"Problem Type: {problem_type_selected}")
        log_accumulator.append(f"User Tests Provided: {'Yes' if user_provided_tests_code.strip() else 'No'}\n")
        yield { output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }


        llm_config_genesis = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": genesis_temp, "max_tokens": genesis_max_tokens}
        llm_config_critique = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": critique_temp, "max_tokens": critique_max_tokens}
        llm_config_evolution = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": evolution_temp, "max_tokens": evolution_max_tokens}

        # --- STAGE 1: GENESIS ---
        yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧬 Stage 1: Genesis Engine - Generating Solutions...</p>") }
        log_accumulator.append("**------ STAGE 1: GENESIS ENGINE ------**")
        initial_raw_solutions = generate_initial_solutions(problem_description_text, initial_hints_text, problem_type_selected, num_initial_solutions_to_gen, llm_config_genesis)
        log_accumulator.append(f"Genesis Engine produced {len(initial_raw_solutions)} raw candidate(s).")
        for i, sol_text in enumerate(initial_raw_solutions): log_accumulator.append(f"  Candidate {i+1} (Raw Snippet): {str(sol_text)[:100]}...")
        yield { output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }


        # --- STAGE 2: CRITIQUE & AUTOMATED EVALUATION ---
        yield { 
            output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🔬 Stage 2: Critique Crucible - Evaluating Candidates...</p>"),
            output_initial_solutions_accordion: gr.Accordion(label="Initial Candidates & Evaluations (Processing...)", open=True)
        }
        log_accumulator.append("\n**------ STAGE 2: CRITIQUE CRUCIBLE & AUTOMATED EVALUATION ------**")
        evaluated_candidates_list = []
        initial_solutions_md_accumulator = ["**Initial Candidates & Detailed Evaluations:**\n"]

        for i, candidate_solution_text in enumerate(initial_raw_solutions):
            log_accumulator.append(f"\n--- Evaluating Candidate {i+1} ---")
            yield { output_status_bar: gr.HTML(value=f"<p style='color: dodgerblue;'>🔬 Evaluating Candidate {i+1} of {num_initial_solutions_to_gen}...</p>") }
            
            evaluation_output_obj = evaluate_solution_candidate(str(candidate_solution_text), problem_description_text, problem_type_selected, user_provided_tests_code, llm_config_critique)
            evaluated_candidates_list.append({"id": i + 1, "solution_text": str(candidate_solution_text), "evaluation_obj": evaluation_output_obj})
            
            log_accumulator.append(f"  Combined Score: {evaluation_output_obj.combined_score}/10")
            # ... (more detailed logging from evaluation_obj as before)
            
            # Update UI with this candidate's evaluation progressively
            current_eval_md = (
                f"**Candidate {i+1} (Score: {evaluation_output_obj.combined_score}/10):**\n"
                f"```python\n{str(candidate_solution_text)}\n```\n\n"
                f"**Evaluation Verdict:**\n{evaluation_output_obj.get_display_critique()}\n---"
            )
            initial_solutions_md_accumulator.append(current_eval_md)
            yield { 
                output_initial_solutions_markdown: gr.Markdown(value="\n".join(initial_solutions_md_accumulator)),
                output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator))
            }
        
        # --- STAGE 3: SELECTION OF CHAMPION ---
        yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🏆 Stage 3: Selecting Champion Candidate...</p>") }
        log_accumulator.append("\n**------ STAGE 3: CHAMPION SELECTION ------**")
        potentially_viable_candidates = [c for c in evaluated_candidates_list if c["evaluation_obj"] and c["evaluation_obj"].combined_score > 0 and not str(c["solution_text"]).startswith("ERROR")]
        
        if not potentially_viable_candidates:
            raise ValueError("No viable candidate solutions found after evaluation. All attempts may have failed or scored too low.")

        champion_candidate_data = sorted(potentially_viable_candidates, key=lambda x: x["evaluation_obj"].combined_score, reverse=True)[0]
        log_accumulator.append(f"Champion Selected: Candidate {champion_candidate_data['id']} with score {champion_candidate_data['evaluation_obj'].combined_score}/10.")
        champion_display_markdown = (
            f"**Champion Candidate ID: {champion_candidate_data['id']} "
            f"(Original Score: {champion_candidate_data['evaluation_obj'].combined_score}/10):**\n"
            f"```python\n{champion_candidate_data['solution_text']}\n```\n\n"
            f"**Original Comprehensive Evaluation:**\n{champion_candidate_data['evaluation_obj'].get_display_critique()}"
        )
        yield { 
            output_champion_accordion: gr.Accordion(label=f"🏆 Champion: Candidate {champion_candidate_data['id']} (Score: {champion_candidate_data['evaluation_obj'].combined_score}/10)", open=True, visible=True),
            output_champion_markdown: gr.Markdown(value=champion_display_markdown, visible=True),
            output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator))
        }

        # --- STAGE 4: EVOLUTIONARY FORGE ---
        yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🛠️ Stage 4: Evolutionary Forge - Refining Champion...</p>") }
        log_accumulator.append("\n**------ STAGE 4: EVOLUTIONARY FORGE ------**")
        evolved_solution_code = evolve_solution(
            str(champion_candidate_data["solution_text"]), champion_candidate_data["evaluation_obj"],
            problem_description_text, problem_type_selected, llm_config_evolution
        )
        log_accumulator.append(f"Raw Evolved Solution (Snippet): {str(evolved_solution_code)[:100]}...")
        
        evolved_solution_display_markdown = ""
        ai_test_analysis_markdown = ""

        if str(evolved_solution_code).startswith("ERROR"):
            evolved_solution_display_markdown = f"<p style='color: red;'>**Evolution Stage Failed:**<br>{evolved_solution_code}</p>"
        else:
            evolved_solution_display_markdown = f"**✨ AlgoForge Omega™ Evolved Artifact ✨:**\n```python\n{evolved_solution_code}\n```"
            if "python" in problem_type_selected.lower() and user_provided_tests_code.strip():
                yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧪 Post-Evolution: Re-testing Evolved Code...</p>") }
                log_accumulator.append("\n--- Post-Evolution Test of Evolved Code ---")
                from core.safe_executor import execute_python_code_with_tests # Ensure imported
                evolved_code_exec_result = execute_python_code_with_tests(str(evolved_solution_code), user_provided_tests_code, timeout_seconds=10)
                
                evolved_solution_display_markdown += (
                    f"\n\n**Post-Evolution Automated Test Results (Simulated):**\n"
                    f"  Status: {'SUCCESS' if evolved_code_exec_result.success else 'FAILED/ERRORS'}\n"
                    f"  Tests Attempted: {evolved_code_exec_result.total_tests}\n"
                    f"  Tests Passed:    {evolved_code_exec_result.passed_tests}\n"
                    f"  Execution Time:  {evolved_code_exec_result.execution_time:.4f}s\n"
                )
                if evolved_code_exec_result.compilation_error: evolved_solution_display_markdown += f"  Compilation Error: {evolved_code_exec_result.compilation_error}\n"
                elif evolved_code_exec_result.timeout_error: evolved_solution_display_markdown += f"  Timeout Error.\n"
                elif evolved_code_exec_result.error: evolved_solution_display_markdown += f"  Execution Error/Output: {evolved_code_exec_result.overall_error_summary}\n"
                elif evolved_code_exec_result.stdout: evolved_solution_display_markdown += f"  Execution Stdout:\n```\n{evolved_code_exec_result.stdout[:300].strip()}\n```\n"
                log_accumulator.append(f"  Evolved Code Test Results: {evolved_code_exec_result}")

                if evolved_code_exec_result.total_tests > 0 :
                    yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧠 Post-Evolution: AI Analyzing Test Results...</p>") }
                    log_accumulator.append("\n--- AI Analysis of Evolved Code's Test Results ---")
                    exec_summary_for_analysis = str(evolved_code_exec_result.overall_error_summary or "Tests completed.")
                    analysis_user_prompt = format_code_test_analysis_user_prompt(str(evolved_solution_code), user_provided_tests_code, f"Passed: {evolved_code_exec_result.passed_tests}/{evolved_code_exec_result.total_tests}. Detail: {exec_summary_for_analysis}")
                    analysis_system_prompt = get_system_prompt("code_execution_explainer")
                    llm_analysis_config = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": 0.3, "max_tokens": critique_max_tokens + 200}
                    
                    from core.llm_clients import call_huggingface_api, call_gemini_api 
                    explanation_response_obj = None
                    if llm_analysis_config["type"] == "hf": explanation_response_obj = call_huggingface_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)
                    elif llm_analysis_config["type"] == "google_gemini": explanation_response_obj = call_gemini_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)

                    if explanation_response_obj and explanation_response_obj.success: 
                        ai_test_analysis_markdown = f"**AI Analysis of Evolved Code's Test Performance:**\n{explanation_response_obj.text}"
                    elif explanation_response_obj: 
                        ai_test_analysis_markdown = f"<p style='color: orange;'>**AI Analysis of Test Performance Failed:**<br>{explanation_response_obj.error}</p>"
                    log_accumulator.append(f"  AI Test Analysis result logged.")
        
        total_time = time.time() - start_time
        log_accumulator.append(f"\n**AlgoForge Omega™ Cycle Complete. Total time: {total_time:.2f} seconds.**")
        yield {
            output_status_bar: gr.HTML(value=f"<p style='color: green;'>✅ Cycle Complete! ({total_time:.2f}s)</p>"),
            output_evolved_accordion: gr.Accordion(label="🌟 Evolved Artifact & Test Analysis", open=True, visible=True),
            output_evolved_markdown: gr.Markdown(value=evolved_solution_display_markdown, visible=True),
            output_ai_test_analysis_markdown: gr.Markdown(value=ai_test_analysis_markdown, visible=True if ai_test_analysis_markdown else False),
            output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
            engage_button: gr.Button(interactive=True) # Re-enable button
        }

    except ValueError as ve: # Catch our specific input/config errors
        log_accumulator.append(f"\n**INPUT/CONFIG ERROR:** {ve}")
        yield {
            output_status_bar: gr.HTML(value=f"<p style='color: red;'>❌ CONFIGURATION ERROR: {ve}</p>", visible=True),
            output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
            engage_button: gr.Button(interactive=True)
        }
    except Exception as e:
        log_accumulator.append(f"\n**UNEXPECTED RUNTIME ERROR:** {type(e).__name__} - {e}\n{traceback.format_exc()}")
        # For other outputs, we might want to clear them or show a general error message
        yield {
            output_status_bar: gr.HTML(value=f"<p style='color: red;'>❌ UNEXPECTED ERROR: {e}. Check logs.</p>", visible=True),
            output_initial_solutions_markdown: gr.Markdown(value="An unexpected error occurred. Please check the interaction log."),
            output_champion_markdown: gr.Markdown(value="Error state."),
            output_evolved_markdown: gr.Markdown(value="Error state."),
            output_ai_test_analysis_markdown: gr.Markdown(value="Error state."),
            output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
            engage_button: gr.Button(interactive=True)
        }


# --- Gradio UI Definition ---
# (This section is the full UI layout with improvements)
css = """
body { font-family: 'Inter', sans-serif; }
.gradio-container { max-width: 1280px !important; margin: auto !important; }
.gr-button-primary { 
    background: linear-gradient(135deg, #007bff 0%, #0056b3 100%) !important; 
    color: white !important;
    border: none !important;
    box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1) !important;
    transition: all 0.2s ease-in-out !important;
}
.gr-button-primary:hover { 
    transform: translateY(-2px) !important;
    box-shadow: 0 6px 12px rgba(0, 0, 0, 0.15) !important;
}
.status-bar p {
    padding: 8px 12px;
    border-radius: 6px;
    font-weight: 500;
    text-align: center;
    margin-bottom: 10px; /* Add some space below status bar */
}
.accordion-section .gr-markdown { padding-top: 5px; padding-bottom: 5px; }
.output-tabs .gr-tabitem {min-height: 400px;} /* Ensure tabs have some min height */
"""

with gr.Blocks(theme=APP_THEME, css=css, title="✨ AlgoForge Omega™ ✨") as app_demo:
    gr.Markdown("# ✨ AlgoForge Omega™ ✨\n### Conceptual AI-Powered Algorithm & Application Foundry")
    gr.Markdown(
        "Define a challenge, configure the AI forge, and witness the (conceptual) evolution of solutions, "
        "now with (simulated) unit testing and more detailed feedback loops!"
    )

    with gr.Row(equal_height=False):
        # --- INPUT COLUMN ---
        with gr.Column(scale=2, min_width=400):
            gr.Markdown("## 💡 1. Define the Challenge")
            with gr.Group():
                problem_type_dropdown = gr.Dropdown(
                    choices=["Python Algorithm with Tests", "Python Algorithm (Critique Only)", "General Algorithm Idea", "Conceptual System Design", "Pseudocode Refinement"],
                    label="Problem Type", value="Python Algorithm with Tests",
                    info="Select '...with Tests' to enable (simulated) unit testing if you provide tests below."
                )
                problem_description_textbox = gr.Textbox(
                    lines=7, label="Problem Description / Desired Outcome",
                    placeholder="Example for 'Python Algorithm with Tests':\n`def calculate_factorial(n: int) -> int:`\nCalculates factorial of n. Should handle n=0 (returns 1) and raise ValueError for n<0."
                )
                initial_hints_textbox = gr.Textbox(
                    lines=4, label="Initial Thoughts / Constraints / Seed Ideas (Optional)",
                    placeholder="E.g., 'Prefer an iterative solution over recursive for factorial.' or 'Consider time complexity and edge cases like empty inputs.'"
                )
                user_tests_textbox = gr.Textbox(
                    lines=7, label="Python Unit Tests (Optional, one `assert` per line)",
                    placeholder="assert calculate_factorial(0) == 1\nassert calculate_factorial(5) == 120\n# For expected errors (advanced, simulated):\n# try:\n#   calculate_factorial(-1)\n# except ValueError:\n#   assert True\n# else:\n#   assert False, \"ValueError not raised\"",
                    info="For 'Python Algorithm with Tests'. Ensure function names match your problem description. Basic try-except for error testing is crudely simulated."
                )
            
            gr.Markdown("## ⚙️ 2. Configure The Forge")
            with gr.Group():
                api_status_html = gr.HTML() # For dynamic API status
                
                # Logic to set API status text (must be done after initialize_all_clients)
                status_messages = []
                if not GEMINI_API_READY and not HF_API_READY:
                    status_messages.append("<p style='color:red; font-weight:bold;'>⚠️ CRITICAL: NO APIs CONFIGURED. App non-functional.</p>")
                else:
                    if GEMINI_API_READY: status_messages.append("<p style='color:green;'>✅ Google Gemini API Ready.</p>")
                    else: status_messages.append("<p style='color:orange;'>⚠️ Google Gemini API NOT Ready (Check GOOGLE_API_KEY).</p>")
                    if HF_API_READY: status_messages.append("<p style='color:green;'>✅ Hugging Face API Ready.</p>")
                    else: status_messages.append("<p style='color:orange;'>⚠️ Hugging Face API NOT Ready (Check HF_TOKEN).</p>")
                api_status_html.value = "".join(status_messages)


                model_selection_dropdown = gr.Dropdown(
                    choices=list(AVAILABLE_MODELS_CONFIG.keys()),
                    value=UI_DEFAULT_MODEL_KEY if UI_DEFAULT_MODEL_KEY in AVAILABLE_MODELS_CONFIG else (list(AVAILABLE_MODELS_CONFIG.keys())[0] if AVAILABLE_MODELS_CONFIG else None),
                    label="LLM Core Model",
                    info="Ensure the corresponding API key is correctly set in Space Secrets."
                )
                num_initial_solutions_slider = gr.Slider(minimum=1, maximum=4, value=2, step=1, label="# Initial Solutions (Genesis Engine)", info="More solutions take longer but provide more diversity.")

                with gr.Accordion("Advanced LLM Parameters (Tune with Caution!)", open=False):
                    with gr.Row():
                        genesis_temp_slider = gr.Slider(minimum=0.0, maximum=1.2, value=0.7, step=0.05, label="Genesis Temp")
                        genesis_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1024, step=128, label="Genesis Max Tokens")
                    with gr.Row():
                        critique_temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.05, label="Critique Temp")
                        critique_max_tokens_slider = gr.Slider(minimum=150, maximum=2048, value=768, step=64, label="Critique Max Tokens")
                    with gr.Row():
                        evolution_temp_slider = gr.Slider(minimum=0.0, maximum=1.2, value=0.75, step=0.05, label="Evolution Temp")
                        evolution_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1536, step=128, label="Evolution Max Tokens")
            
            engage_button = gr.Button("🚀 ENGAGE ALGOFORGE OMEGA™ 🚀", variant="primary", size="lg", elem_id="engage_button_elem")

        # --- OUTPUT COLUMN ---
        with gr.Column(scale=3, min_width=600):
            gr.Markdown("## 🔥 3. The Forge's Output")
            output_status_bar = gr.HTML(value="<p>Idle. Define a challenge and engage!</p>", elem_classes=["status-bar"], visible=True)
            
            with gr.Tabs(elem_id="output_tabs_elem", elem_classes=["output-tabs"]):
                with gr.TabItem("📜 Initial Candidates & Evaluations", id="tab_initial_evals"):
                    output_initial_solutions_accordion = gr.Accordion(label="Initial Candidates & Evaluations", open=True, visible=False, elem_classes=["accordion-section"])
                    with output_initial_solutions_accordion:
                        output_initial_solutions_markdown = gr.Markdown(visible=True)
                
                with gr.TabItem("🏆 Champion Candidate", id="tab_champion"):
                    output_champion_accordion = gr.Accordion(label="Champion Candidate (Pre-Evolution)", open=True, visible=False, elem_classes=["accordion-section"])
                    with output_champion_accordion:
                        output_champion_markdown = gr.Markdown(visible=True)
                
                with gr.TabItem("🌟 Evolved & Tested", id="tab_evolved"):
                    output_evolved_accordion = gr.Accordion(label="Evolved Artifact & Test Analysis", open=True, visible=False, elem_classes=["accordion-section"])
                    with output_evolved_accordion:
                        output_evolved_markdown = gr.Markdown(visible=True)
                        output_ai_test_analysis_markdown = gr.Markdown(visible=True, label="AI Analysis of Evolved Code's Tests")
                
                with gr.TabItem("🛠️ Interaction Log", id="tab_log"):
                    with gr.Accordion(label="Developer Interaction Log", open=True, elem_classes=["accordion-section"]): # Always open log
                        output_interaction_log_markdown = gr.Markdown(value="Log will appear here...", visible=True)
        
        # Connect button to the orchestration function wrapper
        # The wrapper handles UI updates via yield
        engage_button.click(
            fn=run_algoforge_orchestrator_ui_wrapper, # Call the wrapper
            inputs=[
                problem_type_dropdown, problem_description_textbox, initial_hints_textbox, user_tests_textbox,
                num_initial_solutions_slider, model_selection_dropdown,
                genesis_temp_slider, genesis_max_tokens_slider,
                critique_temp_slider, critique_max_tokens_slider,
                evolution_temp_slider, evolution_max_tokens_slider
            ],
            outputs=[ # These are the components updated by the `yield` statements
                output_status_bar,
                output_initial_solutions_accordion, output_initial_solutions_markdown,
                output_champion_accordion, output_champion_markdown,
                output_evolved_accordion, output_evolved_markdown, output_ai_test_analysis_markdown,
                output_interaction_log_markdown,
                engage_button # To disable/re-enable it
            ]
        )
    
    gr.Markdown("---")
    gr.Markdown(
        "**Disclaimer:** This is a conceptual, educational demonstration. "
        "The (simulated) unit testing feature is for illustrative purposes. "
        "**NEVER run LLM-generated code from an untrusted source in an unrestricted environment.** "
        "Implementing robust and secure code sandboxing is complex and absolutely critical for safety in real-world applications. "
        "LLM outputs always require careful human review and verification."
    )
    gr.HTML("<p style='text-align:center; font-size:0.9em; color:grey;'>AlgoForge Omega™ - Powered by Gradio, Gemini & Hugging Face Models</p>")


# --- Entry Point for Running the Gradio App ---
if __name__ == "__main__":
    print("="*80)
    print("AlgoForge Omega™ Conceptual Demo (WOW UI Attempt) - Launching...")
    print(f"  Google Gemini API Configured (from app.py check): {GEMINI_API_READY}")
    print(f"  Hugging Face API Configured (from app.py check): {HF_API_READY}")
    if not GEMINI_API_READY and not HF_API_READY:
        print("  CRITICAL WARNING: No API keys seem to be configured correctly. The application will likely be non-functional.")
    print(f"  UI Default Model Key: {UI_DEFAULT_MODEL_KEY}")
    print(f"  Available models for UI: {list(AVAILABLE_MODELS_CONFIG.keys())}")
    print("="*80)
    app_demo.launch(debug=True, server_name="0.0.0.0")