Update app.py
Browse files
app.py
CHANGED
@@ -1,237 +1,216 @@
|
|
1 |
import os
|
2 |
-
import re
|
3 |
-
from typing import Dict, List, Optional, Tuple
|
4 |
-
import base64
|
5 |
-
import mimetypes
|
6 |
-
import PyPDF2
|
7 |
-
import docx
|
8 |
-
import cv2
|
9 |
-
import numpy as np
|
10 |
-
from PIL import Image
|
11 |
-
import pytesseract
|
12 |
-
import requests
|
13 |
-
from urllib.parse import urlparse, urljoin
|
14 |
-
from bs4 import BeautifulSoup
|
15 |
import json
|
16 |
-
import
|
17 |
-
import io
|
18 |
-
import asyncio
|
19 |
-
|
20 |
import gradio as gr
|
|
|
21 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
|
|
|
|
22 |
from tavily import TavilyClient
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
# ---
|
25 |
-
|
26 |
-
# --- Multi-Mode Prompt Engineering ---
|
27 |
|
28 |
-
#
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
3.
|
34 |
-
4. Embed all JavaScript logic within the HTML file.
|
35 |
-
5. Provide a complete, functional application, not just a snippet.
|
36 |
-
6. For website redesign tasks, use the provided original HTML as a starting point, preserving content and structure while modernizing the design.
|
37 |
-
Always output ONLY the raw HTML code inside a ```html ... ``` code block.
|
38 |
-
"""
|
39 |
|
40 |
-
#
|
41 |
-
|
42 |
-
|
|
|
|
|
43 |
|
44 |
-
# --- Client
|
45 |
HF_TOKEN = os.getenv('HF_TOKEN')
|
46 |
TAVILY_API_KEY = os.getenv('TAVILY_API_KEY')
|
47 |
-
|
48 |
client = InferenceClient(token=HF_TOKEN)
|
49 |
tavily_client = TavilyClient(api_key=TAVILY_API_KEY) if TAVILY_API_KEY else None
|
50 |
|
51 |
-
#
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
return
|
77 |
|
78 |
def create_zip_file(html_content: str, user_prompt: str) -> Optional[str]:
|
79 |
if not html_content: return None
|
80 |
zip_buffer = io.BytesIO()
|
81 |
with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zf:
|
82 |
-
readme = f"#
|
83 |
zf.writestr("index.html", html_content)
|
84 |
zf.writestr("README.md", readme)
|
85 |
-
zip_path = "/tmp/
|
86 |
with open(zip_path, "wb") as f: f.write(zip_buffer.getvalue())
|
87 |
return zip_path
|
88 |
|
89 |
-
# --- Core
|
90 |
-
|
91 |
-
async def run_agent(prompt: str, context: str, model_id: str):
|
92 |
-
"""Generic function to run an agent asynchronously."""
|
93 |
messages = [{"role": "system", "content": prompt}, {"role": "user", "content": context}]
|
|
|
94 |
try:
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
lambda: client.chat_completion(model=model_id, messages=messages, max_tokens=2048)
|
99 |
-
)
|
100 |
-
return response.choices[0].message.content or "Agent returned an empty response."
|
101 |
except Exception as e:
|
102 |
-
return f"Agent
|
103 |
|
104 |
-
async def
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
try:
|
117 |
-
search_results = tavily_client.search(query=f"Market analysis and technical considerations for: {text_input}", search_depth="advanced", max_results=5)
|
118 |
-
web_context = "\n\n---\n\n".join([f"**Source:** {res['title']}\n{res['content']}" for res in search_results['results']])
|
119 |
-
except Exception as e:
|
120 |
-
web_context = f"Web search failed: {e}"
|
121 |
-
|
122 |
-
full_context_for_agents = f"{context}\n\n--- Web Search Context ---\n{web_context}"
|
123 |
-
|
124 |
-
# Step 3: Run Cognitive Council in Parallel
|
125 |
-
progress(0.5, desc="Convening Cognitive Council...")
|
126 |
-
critic_task = asyncio.create_task(run_agent(CRITIC_AGENT_PROMPT, full_context_for_agents, CRITIC_MODEL_ID))
|
127 |
-
visionary_task = asyncio.create_task(run_agent(VISIONARY_AGENT_PROMPT, full_context_for_agents, VISIONARY_MODEL_ID))
|
128 |
-
|
129 |
-
critic_report = await critic_task
|
130 |
-
visionary_report = await visionary_task
|
131 |
-
progress(1, desc="Analysis Complete.")
|
132 |
-
|
133 |
-
analysis_summary = f"""
|
134 |
-
## π§ Cognitive Council Analysis
|
135 |
-
|
136 |
-
### visionary-thoughts β¨ The Visionary's Report
|
137 |
-
{visionary_report}
|
138 |
-
|
139 |
-
### critical-analysis π§ The Critic's Report
|
140 |
-
{critic_report}
|
141 |
-
|
142 |
-
---
|
143 |
-
|
144 |
-
## π Web Search Context
|
145 |
-
{web_context}
|
146 |
-
"""
|
147 |
-
return analysis_summary, full_context_for_agents, gr.update(visible=True)
|
148 |
-
|
149 |
-
def generate_application_code(context: str, progress=gr.Progress(track_tqdm=True)):
|
150 |
-
"""Generates the application code."""
|
151 |
-
progress(0, desc="Engaging Code Generation Engine...")
|
152 |
-
|
153 |
-
# For a single-file generator, we don't need async here, but we could add it.
|
154 |
-
response = client.chat_completion(
|
155 |
-
model=CODE_MODEL_ID,
|
156 |
-
messages=[{"role": "system", "content": CODE_GEN_PROMPT}, {"role": "user", "content": context}],
|
157 |
-
max_tokens=8192,
|
158 |
-
stream=False # We need the full code to process it
|
159 |
-
)
|
160 |
-
raw_code = response.choices[0].message.content or ""
|
161 |
-
clean_code = remove_code_block(raw_code)
|
162 |
-
|
163 |
-
progress(1, desc="Build Complete.")
|
164 |
-
|
165 |
-
zip_path = create_zip_file(clean_code, context.split("\n\n---")[0])
|
166 |
|
167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
|
169 |
# --- Gradio UI Definition ---
|
170 |
-
with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple"),
|
171 |
-
|
172 |
-
# State to hold the full context after analysis
|
173 |
-
analysis_context = gr.State("")
|
174 |
-
|
175 |
-
gr.Markdown("# π€ AnyCoder Ξ©: The Cognitive-Generative IDE")
|
176 |
-
gr.Markdown("From raw idea, to deep analysis, to fully-built application in one seamless workflow.")
|
177 |
|
|
|
|
|
178 |
with gr.Row():
|
179 |
-
with gr.Column(scale=
|
180 |
gr.Markdown("### π Control Deck")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
|
182 |
-
with gr.
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
with gr.Tab("Preview", id="preview_tab"):
|
200 |
-
preview_output = gr.HTML(label="Live Preview")
|
201 |
-
download_button = gr.File(label="Download Project as .zip", visible=False, interactive=False)
|
202 |
-
|
203 |
-
with gr.Tab("Code", id="code_tab"):
|
204 |
-
code_output = gr.Code(language="html", label="Generated Code")
|
205 |
-
|
206 |
-
# --- Event Handlers ---
|
207 |
|
208 |
-
#
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
generate_button.click(
|
219 |
-
on_generate_click,
|
220 |
-
inputs=[objective_selector, prompt_input, file_input],
|
221 |
-
outputs=[analysis_output, analysis_context, build_from_analysis_button, output_tabs, code_output, preview_output, download_button]
|
222 |
)
|
223 |
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
)
|
234 |
|
235 |
-
|
236 |
-
if
|
237 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import json
|
3 |
+
from typing import Dict, List, Optional
|
|
|
|
|
|
|
4 |
import gradio as gr
|
5 |
+
from fastapi import FastAPI
|
6 |
from huggingface_hub import InferenceClient
|
7 |
+
import asyncio
|
8 |
+
import plotly.graph_objects as go
|
9 |
+
import networkx as nx
|
10 |
+
import zipfile
|
11 |
+
import io
|
12 |
from tavily import TavilyClient
|
13 |
+
import PyPDF2
|
14 |
+
import docx
|
15 |
+
from PIL import Image
|
16 |
+
import pytesseract
|
17 |
|
18 |
+
# --- Synapsera Ξ© Prime: The Definitive Cognitive-Generative OS ---
|
|
|
|
|
19 |
|
20 |
+
# --- Agent Prompts ---
|
21 |
+
SYNTHESIS_AGENT_PROMPT = "You are the **Synthesis Agent**. Analyze the user's input and provided web context. Create a foundational Semantic Thought Graph. Output ONLY a valid JSON object with 'summary', 'nodes', and 'edges'."
|
22 |
+
CRITIC_AGENT_PROMPT = "You are **THE CRITIC**. Analyze the provided Thought Graph. Ruthlessly identify every logical fallacy, weak assumption, and market risk. Be concise and direct. Output as Markdown."
|
23 |
+
VISIONARY_AGENT_PROMPT = "You are **THE VISIONARY**. Analyze the provided Thought Graph. Identify emergent patterns and high-impact 'what if' scenarios. Extrapolate the idea into its most ambitious form. Output as an inspiring Markdown report."
|
24 |
+
ARCHITECT_AGENT_PROMPT = "You are the **Architect Agent**. Given a user request, generate a complete, single-file HTML application using Tailwind CSS via CDN. Your output must be ONLY the raw HTML code."
|
25 |
+
WEB_SEARCH_PROMPT = "Based on the user's idea, generate a JSON list of 3-5 concise, factual web search queries. Output ONLY the JSON. Example: {\"queries\": [\"market size for X\"]}"
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
+
# --- Multi-Model Configuration ---
|
28 |
+
SYNTHESIS_MODEL = "mistralai/Mixtral-8x22B-Instruct-v0.1"
|
29 |
+
CRITIC_MODEL = "mistralai/Mixtral-8x22B-Instruct-v0.1"
|
30 |
+
VISIONARY_MODEL = "meta-llama/Meta-Llama-3-70B-Instruct"
|
31 |
+
ARCHITECT_MODEL = "deepseek-ai/deepseek-coder-v2-lite-instruct"
|
32 |
|
33 |
+
# --- Client Initialization ---
|
34 |
HF_TOKEN = os.getenv('HF_TOKEN')
|
35 |
TAVILY_API_KEY = os.getenv('TAVILY_API_KEY')
|
|
|
36 |
client = InferenceClient(token=HF_TOKEN)
|
37 |
tavily_client = TavilyClient(api_key=TAVILY_API_KEY) if TAVILY_API_KEY else None
|
38 |
|
39 |
+
# --- Helper Functions ---
|
40 |
+
def render_3d_graph(graph_data: Optional[Dict] = None):
|
41 |
+
if not graph_data or not graph_data.get('nodes'):
|
42 |
+
return go.Figure(layout={"title_text": "Cognitive Nebula Awaiting Synthesis...", "template": "plotly_dark", "height": 600})
|
43 |
+
G = nx.Graph()
|
44 |
+
for node in graph_data['nodes']: G.add_node(node['id'])
|
45 |
+
for edge in graph_data.get('edges', []):
|
46 |
+
if edge['source'] in G.nodes and edge['target'] in G.nodes: G.add_edge(edge['source'], edge['target'])
|
47 |
+
pos = nx.spring_layout(G, dim=3, seed=42, iterations=70)
|
48 |
+
node_ids_in_order = list(G.nodes())
|
49 |
+
pos_data = [pos[node_id] for node_id in node_ids_in_order]
|
50 |
+
node_x, node_y, node_z = zip(*pos_data) if pos_data else ([], [], [])
|
51 |
+
edge_x, edge_y, edge_z = [], [], []
|
52 |
+
for edge in G.edges():
|
53 |
+
x0, y0, z0 = pos[edge[0]]; x1, y1, z1 = pos[edge[1]]
|
54 |
+
edge_x.extend([x0, x1, None]); edge_y.extend([y0, y1, None]); edge_z.extend([z0, z1, None])
|
55 |
+
node_info = {n['id']: n for n in graph_data['nodes']}
|
56 |
+
node_trace = go.Scatter3d(x=node_x, y=node_y, z=node_z, mode='markers+text',
|
57 |
+
text=[f"<b>{node_info[node_id]['label']}</b>" for node_id in node_ids_in_order],
|
58 |
+
customdata=[json.dumps(node_info[node_id]) for node_id in node_ids_in_order],
|
59 |
+
hoverinfo='text', hovertext=[f"<b>{node_info[node_id]['label']}</b><br>Type: {node_info[node_id]['type']}" for node_id in node_ids_in_order],
|
60 |
+
marker=dict(size=12, line=dict(width=2)))
|
61 |
+
edge_trace = go.Scatter3d(x=edge_x, y=edge_y, z=edge_z, mode='lines', line=dict(color='#888', width=1), hoverinfo='none')
|
62 |
+
fig = go.Figure(data=[edge_trace, node_trace])
|
63 |
+
fig.update_layout(title=f"Cognitive Nebula: {graph_data.get('summary', '')}", showlegend=False, template="plotly_dark", height=600, clickmode='event+select')
|
64 |
+
return fig
|
65 |
|
66 |
def create_zip_file(html_content: str, user_prompt: str) -> Optional[str]:
|
67 |
if not html_content: return None
|
68 |
zip_buffer = io.BytesIO()
|
69 |
with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zf:
|
70 |
+
readme = f"# Synapsera-Built Application\n\n> {user_prompt}"
|
71 |
zf.writestr("index.html", html_content)
|
72 |
zf.writestr("README.md", readme)
|
73 |
+
zip_path = "/tmp/synapsera_project.zip"
|
74 |
with open(zip_path, "wb") as f: f.write(zip_buffer.getvalue())
|
75 |
return zip_path
|
76 |
|
77 |
+
# --- Core Asynchronous Multi-Agent Logic ---
|
78 |
+
async def run_agent(prompt: str, context: str, model_id: str, json_mode: bool = False):
|
|
|
|
|
79 |
messages = [{"role": "system", "content": prompt}, {"role": "user", "content": context}]
|
80 |
+
response_format = {"type": "json_object"} if json_mode else None
|
81 |
try:
|
82 |
+
response = await asyncio.to_thread(
|
83 |
+
client.chat_completion, model=model_id, messages=messages, max_tokens=8192, response_format=response_format)
|
84 |
+
return response.choices[0].message.content or ""
|
|
|
|
|
|
|
85 |
except Exception as e:
|
86 |
+
return json.dumps({"error": f"Agent {model_id} failed: {e}"})
|
87 |
|
88 |
+
async def get_web_context(text_input: str, model_id: str):
|
89 |
+
if not tavily_client: return "*Tavily client not configured. Skipping web search.*"
|
90 |
+
try:
|
91 |
+
search_queries_str = await run_agent(WEB_SEARCH_PROMPT, text_input, model_id, json_mode=True)
|
92 |
+
search_queries = json.loads(search_queries_str).get("queries", [])
|
93 |
+
if not search_queries: return "No relevant search queries were generated."
|
94 |
+
search_results = await asyncio.to_thread(tavily_client.search, query=" ".join(search_queries), search_depth="advanced", max_results=3)
|
95 |
+
return "\n\n---\n\n".join([f"**Source:** {res['title']}\n{res['content']}" for res in search_results['results']])
|
96 |
+
except Exception as e: return f"Web search failed: {e}"
|
97 |
+
|
98 |
+
async def universal_synthesis_flow(synthesis_mode, text_input, progress=gr.Progress(track_tqdm=True)):
|
99 |
+
yield {terminal_output: gr.update(value="`[STATUS] Engaging Synapsera OS...`")}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
+
web_context = await get_web_context(text_input, SYNTHESIS_MODEL)
|
102 |
+
yield {terminal_output: gr.update(value=f"`[STATUS] Web context acquired.`\n\n**Web Search Summary:**\n{web_context[:400]}...")}
|
103 |
+
full_context = f"User Idea: {text_input}\n\nWeb Context:\n{web_context}"
|
104 |
+
|
105 |
+
if synthesis_mode == "Analyze Thought":
|
106 |
+
graph_json_str = await run_agent(SYNTHESIS_AGENT_PROMPT, full_context, SYNTHESIS_MODEL, json_mode=True)
|
107 |
+
try: graph_data = json.loads(graph_json_str)
|
108 |
+
except json.JSONDecodeError:
|
109 |
+
yield {terminal_output: gr.update(value=f"`[FATAL] Synthesis Agent failed.`")}
|
110 |
+
return
|
111 |
+
yield {
|
112 |
+
session_state: graph_data,
|
113 |
+
terminal_output: gr.update(value="`[STATUS] Thought Model synthesized. Convening Cognitive Council...`"),
|
114 |
+
cognitive_nebula: render_3d_graph(graph_data)
|
115 |
+
}
|
116 |
+
council_context = f"Thought Graph:\n{graph_json_str}\n\nWeb Context:\n{web_context}"
|
117 |
+
critic_task = run_agent(CRITIC_AGENT_PROMPT, council_context, CRITIC_MODEL)
|
118 |
+
visionary_task = run_agent(VISIONARY_AGENT_PROMPT, council_context, VISIONARY_MODEL)
|
119 |
+
critic_res, visionary_res = await asyncio.gather(critic_task, visionary_task)
|
120 |
+
yield {
|
121 |
+
terminal_output: gr.update(value="`[COMPLETE] Cognitive Council session concluded.`"),
|
122 |
+
critic_output: critic_res, visionary_output: visionary_res,
|
123 |
+
build_from_analysis_btn: gr.update(visible=True)
|
124 |
+
}
|
125 |
+
else: # Build Application
|
126 |
+
generated_code = await run_agent(ARCHITECT_AGENT_PROMPT, full_context, ARCHITECT_MODEL)
|
127 |
+
zip_path = create_zip_file(generated_code, text_input)
|
128 |
+
yield {
|
129 |
+
terminal_output: gr.update(value="`[COMPLETE] Application build complete.`"),
|
130 |
+
live_preview: generated_code, source_code: generated_code,
|
131 |
+
download_button: gr.update(value=zip_path, visible=True)
|
132 |
+
}
|
133 |
|
134 |
# --- Gradio UI Definition ---
|
135 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple"), css="footer{display:none !important}") as gradio_app:
|
136 |
+
session_state = gr.State({})
|
|
|
|
|
|
|
|
|
|
|
137 |
|
138 |
+
gr.Markdown("# π§ Synapsera Ξ© Prime: The Cognitive-Generative OS")
|
139 |
+
|
140 |
with gr.Row():
|
141 |
+
with gr.Column(scale=1, min_width=450):
|
142 |
gr.Markdown("### π Control Deck")
|
143 |
+
synthesis_mode = gr.Radio(["Analyze Thought", "Build Application"], label="Objective", value="Analyze Thought")
|
144 |
+
text_input = gr.Textbox(lines=10, label="Input Your Core Thought or Application Idea", placeholder="A decentralized platform for peer-reviewing scientific research...")
|
145 |
+
synthesize_btn = gr.Button("Engage Synapsera", variant="primary", size="lg")
|
146 |
+
build_from_analysis_btn = gr.Button("Build This Analyzed Concept β‘οΈ", variant="secondary", visible=False)
|
147 |
+
|
148 |
+
with gr.Column(scale=2):
|
149 |
+
terminal_output = gr.Markdown("`[STATUS] Idle`")
|
150 |
|
151 |
+
with gr.Blocks(visible=True) as analysis_canvas:
|
152 |
+
cognitive_nebula = gr.Plot()
|
153 |
+
with gr.Tabs():
|
154 |
+
with gr.Tab("π¬ Focus Panel"):
|
155 |
+
focus_panel_md = gr.Markdown("Click a node on the graph to focus.")
|
156 |
+
with gr.Tab("π§ The Critic's Report"):
|
157 |
+
critic_output = gr.Markdown("*Awaiting analysis...*")
|
158 |
+
with gr.Tab("β¨ The Visionary's Report"):
|
159 |
+
visionary_output = gr.Markdown("*Awaiting analysis...*")
|
160 |
+
|
161 |
+
with gr.Blocks(visible=False) as build_canvas:
|
162 |
+
with gr.Tabs():
|
163 |
+
with gr.Tab("π Live Preview"):
|
164 |
+
live_preview = gr.HTML()
|
165 |
+
with gr.Tab("π Source Code"):
|
166 |
+
source_code = gr.Code(language="html")
|
167 |
+
download_button = gr.File(label="Download Project (.zip)", visible=False, interactive=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
|
169 |
+
# --- Event Handlers ---
|
170 |
+
def on_mode_change(mode):
|
171 |
+
is_analysis = mode == "Analyze Thought"
|
172 |
+
return gr.update(visible=is_analysis), gr.update(visible=not is_analysis)
|
173 |
+
synthesis_mode.change(on_mode_change, inputs=synthesis_mode, outputs=[analysis_canvas, build_canvas])
|
174 |
+
|
175 |
+
synthesize_btn.click(
|
176 |
+
universal_synthesis_flow,
|
177 |
+
inputs=[synthesis_mode, text_input],
|
178 |
+
outputs=[session_state, terminal_output, cognitive_nebula, critic_output, visionary_output, build_from_analysis_btn, live_preview, source_code, download_button]
|
|
|
|
|
|
|
|
|
179 |
)
|
180 |
|
181 |
+
def on_build_from_analysis(graph_data, progress=gr.Progress(track_tqdm=True)):
|
182 |
+
# This function is now simpler as it just calls the agent
|
183 |
+
full_context = f"User Idea:\n{graph_data.get('summary', '')}\n\nFull Thought Model:\n{json.dumps(graph_data, indent=2)}"
|
184 |
+
# We can't use async here directly, so we call the sync version or adapt
|
185 |
+
# For simplicity, we'll do a blocking call here.
|
186 |
+
loop = asyncio.new_event_loop()
|
187 |
+
asyncio.set_event_loop(loop)
|
188 |
+
generated_code = loop.run_until_complete(run_agent(ARCHITECT_AGENT_PROMPT, full_context, ARCHITECT_MODEL))
|
189 |
+
zip_path = create_zip_file(generated_code, graph_data.get('summary', ''))
|
190 |
+
return gr.update(visible=False), gr.update(visible=True), generated_code, generated_code, gr.update(value=zip_path, visible=True)
|
191 |
+
|
192 |
+
build_from_analysis_btn.click(
|
193 |
+
on_build_from_analysis,
|
194 |
+
inputs=[session_state],
|
195 |
+
outputs=[analysis_canvas, build_canvas, live_preview, source_code, download_button]
|
196 |
)
|
197 |
|
198 |
+
def select_node_from_graph(evt: gr.SelectData):
|
199 |
+
if evt.value:
|
200 |
+
# The customdata is a JSON string of the node object
|
201 |
+
node_data = json.loads(evt.customdata)
|
202 |
+
md_output = f"""
|
203 |
+
### Node: {node_data.get('label')}
|
204 |
+
**Type:** {node_data.get('type')}
|
205 |
+
**Importance:** {node_data.get('importance')}
|
206 |
+
**Content:** {node_data.get('content')}
|
207 |
+
**Mirror Analysis:** {node_data.get('mirror_analysis')}
|
208 |
+
"""
|
209 |
+
return md_output
|
210 |
+
return "Click a node on the graph to focus."
|
211 |
+
|
212 |
+
cognitive_nebula.select(select_node_from_graph, outputs=[focus_panel_md])
|
213 |
+
|
214 |
+
# --- FastAPI Application ---
|
215 |
+
app = FastAPI()
|
216 |
+
app = gr.mount_gradio_app(app, gradio_app, path="/")
|