Leonydis137 commited on
Commit
993870e
·
verified ·
1 Parent(s): c22f36e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -178
app.py CHANGED
@@ -1,36 +1,24 @@
1
  from fastapi import FastAPI
2
  import gradio as gr
3
- from orchestrator import run_agents
4
- from memory import init_memory
5
-
6
- app = FastAPI()
7
- memory = init_memory()
8
-
9
- def agent_interface(goal):
10
- return run_agents(goal, memory)
11
-
12
- gr_interface = gr.Interface(
13
- fn=agent_interface,
14
- inputs=gr.Textbox(lines=2, placeholder="Describe your task...", label="Your Task"),
15
- outputs=gr.Textbox(lines=20, label="Multi-Agent Output"),
16
- title="🧠 Multi-Agent Autonomous AI System",
17
- description="Planner → Executor → Critic | Contextual Memory | Logs | Expandable AI System"
18
- )
19
-
20
- gr.mount_gradio_app(app, gr_interface, path="/gradio")
21
-
22
- @app.get("/")
23
- def root():
24
- return {"message": "Multi-Agent AI system with logging and memory."}
25
- ''',
26
-
27
  from src.core.cognitive_engine import CognitiveEngine
28
  from src.utils.hf_packager import HFSpacePackager
29
- import gradio as gr
30
- import time
 
 
 
 
31
  import os
32
  import psutil
33
  import json
 
 
 
 
 
 
 
 
34
 
35
  # Initialize components
36
  cognitive_engine = CognitiveEngine()
@@ -78,6 +66,40 @@ def run_cycle(task_description):
78
  ""
79
  )
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  # Get system resources
82
  def get_resource_usage():
83
  return {
@@ -101,12 +123,40 @@ def submit_manual_code(code):
101
  except Exception as e:
102
  return f"⚠️ Error: {str(e)}", code
103
 
 
104
  with gr.Blocks(css="static/style.css", theme=gr.themes.Soft()) as demo:
105
  knowledge = load_knowledge()
106
 
107
- gr.Markdown("# 🤖 Interactive Autonomous AI System")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
109
- with gr.Tab("Task-Based Improvement"):
110
  with gr.Row():
111
  with gr.Column():
112
  task_input = gr.Textbox(
@@ -129,7 +179,7 @@ with gr.Blocks(css="static/style.css", theme=gr.themes.Soft()) as demo:
129
  interactive=True
130
  )
131
 
132
- with gr.Tab("Manual Code Submission"):
133
  with gr.Row():
134
  with gr.Column():
135
  manual_code = gr.Code(
@@ -155,10 +205,11 @@ with gr.Blocks(css="static/style.css", theme=gr.themes.Soft()) as demo:
155
  value=get_task_history(),
156
  interactive=False
157
  )
158
- gr.Markdown("### Improvement Database")
159
- knowledge_display = gr.JSON(
160
- value={k: v for k, v in knowledge.items() if k != "solutions"},
161
- label="System Knowledge"
 
162
  )
163
  refresh_knowledge = gr.Button("🔁 Refresh Knowledge")
164
 
@@ -186,152 +237,16 @@ with gr.Blocks(css="static/style.css", theme=gr.themes.Soft()) as demo:
186
  refresh_knowledge.click(
187
  lambda: load_knowledge(),
188
  inputs=[],
189
- outputs=knowledge_display
190
  )
191
 
192
- if __name__ == "__main__":
193
- demo.launch(
194
- server_name="0.0.0.0",
195
- server_port=7860,
196
- show_api=True
197
- )
198
 
199
- "orchestrator.py": '''
200
- from agents.planner import plan_task
201
- from agents.executor import execute_step
202
- from agents.critic import review_result
203
- from memory import add_to_memory, search_memory
204
- import uuid
205
- from datetime import datetime
206
 
207
- def run_agents(goal, memory, session_id=None):
208
- session_id = session_id or str(uuid.uuid4())[:8]
209
- timestamp = datetime.now().isoformat(timespec="seconds")
210
-
211
- log = f"[{timestamp}] Session {session_id}: Goal: {goal}\\n"
212
- history = search_memory(goal, memory)
213
- context = "\\n".join(history[:3]) if history else "No relevant memory found."
214
-
215
- plan = plan_task(goal, memory)
216
- add_to_memory(f"[{session_id}] 🧠 Context: {context}", memory)
217
- add_to_memory(f"[{session_id}] 🗂 Plan: {plan}", memory)
218
-
219
- outputs = []
220
- for step in plan:
221
- result = execute_step(step)
222
- add_to_memory(f"[{session_id}] 🔧 Executor ran: {step} -> {result}", memory)
223
-
224
- review = review_result(step, result)
225
- add_to_memory(f"[{session_id}] 🔍 Critic: {review}", memory)
226
-
227
- step_log = f"🔹 Step: {step}\\n🛠 Result: {result}\\n🧠 Review: {review}\\n"
228
- log += step_log + "\\n"
229
- outputs.append(step_log)
230
-
231
- with open("log.txt", "a") as f:
232
- f.write(log + "\\n")
233
-
234
- return "\\n".join(outputs)
235
- ''',
236
-
237
- "memory.py": '''
238
- from sentence_transformers import SentenceTransformer
239
- import numpy as np
240
- import faiss
241
- import json
242
- import os
243
-
244
- model = SentenceTransformer("all-MiniLM-L6-v2")
245
- MEMORY_LOG = "memory_log.json"
246
-
247
- def init_memory():
248
- dim = 384
249
- index = faiss.IndexFlatL2(dim)
250
- memory = {"index": index, "texts": []}
251
- if os.path.exists(MEMORY_LOG):
252
- with open(MEMORY_LOG, "r") as f:
253
- memory["texts"] = json.load(f)
254
- vectors = np.array([model.encode([text])[0] for text in memory["texts"]])
255
- if len(vectors) > 0:
256
- memory["index"].add(vectors)
257
- return memory
258
-
259
- def add_to_memory(text, memory):
260
- vec = model.encode([text])[0]
261
- memory["index"].add(np.array([vec]))
262
- memory["texts"].append(text)
263
- with open(MEMORY_LOG, "w") as f:
264
- json.dump(memory["texts"], f)
265
-
266
- def search_memory(query, memory, k=5):
267
- vec = model.encode([query])[0]
268
- D, I = memory["index"].search(np.array([vec]), k)
269
- return [memory["texts"][i] for i in I[0] if i < len(memory["texts"])]
270
- ''',
271
-
272
- "agents/planner.py": '''
273
- def plan_task(goal, memory):
274
- plan = []
275
- if "website" in goal:
276
- plan.extend([
277
- "Create basic HTML structure",
278
- "Add styling with CSS",
279
- "Add interactivity with JavaScript",
280
- "Review and test website"
281
- ])
282
- else:
283
- plan.extend([
284
- f"Understand the goal: {goal}",
285
- "Find the best method to solve it",
286
- "Execute and gather result"
287
- ])
288
- return plan
289
- ''',
290
-
291
- "agents/executor.py": '''
292
- import subprocess
293
-
294
- def execute_step(step):
295
- step_lower = step.lower()
296
- if "html" in step_lower:
297
- return "<html><body>Hello Multi-Agent World!</body></html>"
298
- elif "calculate" in step_lower:
299
- try:
300
- expression = step_lower.replace("calculate", "").strip()
301
- return str(eval(expression))
302
- except:
303
- return "Calculation error."
304
- elif "python" in step_lower or "code" in step_lower:
305
- try:
306
- code = step.split("```python")[1].split("```")[0] if "```python" in step else step
307
- result = subprocess.check_output(["python3", "-c", code], stderr=subprocess.STDOUT, timeout=5)
308
- return result.decode()
309
- except Exception as e:
310
- return f"Error running Python code: {e}"
311
- else:
312
- return f"No defined execution path for: {step}"
313
- ''',
314
-
315
- "agents/critic.py": '''
316
- def review_result(step, result):
317
- if not result or "error" in result.lower():
318
- return f"❌ Step failed or incomplete: {step}. Result: {result[:80]}"
319
- if len(result) < 10:
320
- return f"⚠️ Result seems too short for step: {step}."
321
- return f"✅ Step successful. Result preview: {result[:80]}"
322
- ''',
323
-
324
- "requirements.txt": '''
325
- fastapi
326
- uvicorn
327
- gradio
328
- faiss-cpu
329
- sentence-transformers
330
- numpy
331
- ''',
332
-
333
- ".huggingface.yaml": '''
334
- sdk: gradio
335
- sdk_version: 3.50.2
336
- app_file: app.py
337
- ''',
 
1
  from fastapi import FastAPI
2
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from src.core.cognitive_engine import CognitiveEngine
4
  from src.utils.hf_packager import HFSpacePackager
5
+ from agents.planner import plan_task
6
+ from agents.executor import execute_step
7
+ from agents.critic import review_result
8
+ from memory import init_memory, add_to_memory, search_memory
9
+ import uuid
10
+ from datetime import datetime
11
  import os
12
  import psutil
13
  import json
14
+ import time
15
+ import subprocess
16
+ from sentence_transformers import SentenceTransformer
17
+ import numpy as np
18
+ import faiss
19
+
20
+ app = FastAPI()
21
+ memory = init_memory()
22
 
23
  # Initialize components
24
  cognitive_engine = CognitiveEngine()
 
66
  ""
67
  )
68
 
69
+ # Multi-agent system
70
+ def run_agents(goal, memory, session_id=None):
71
+ session_id = session_id or str(uuid.uuid4())[:8]
72
+ timestamp = datetime.now().isoformat(timespec="seconds")
73
+
74
+ log = f"[{timestamp}] Session {session_id}: Goal: {goal}\n"
75
+ history = search_memory(goal, memory)
76
+ context = "\n".join(history[:3]) if history else "No relevant memory found."
77
+
78
+ plan = plan_task(goal, memory)
79
+ add_to_memory(f"[{session_id}] 🧠 Context: {context}", memory)
80
+ add_to_memory(f"[{session_id}] 🗂 Plan: {plan}", memory)
81
+
82
+ outputs = []
83
+ for step in plan:
84
+ result = execute_step(step)
85
+ add_to_memory(f"[{session_id}] 🔧 Executor ran: {step} -> {result}", memory)
86
+
87
+ review = review_result(step, result)
88
+ add_to_memory(f"[{session_id}] 🔍 Critic: {review}", memory)
89
+
90
+ step_log = f"🔹 Step: {step}\n🛠 Result: {result}\n🧠 Review: {review}\n"
91
+ log += step_log + "\n"
92
+ outputs.append(step_log)
93
+
94
+ with open("log.txt", "a") as f:
95
+ f.write(log + "\n")
96
+
97
+ return "\n".join(outputs)
98
+
99
+ # Agent interface
100
+ def agent_interface(goal):
101
+ return run_agents(goal, memory)
102
+
103
  # Get system resources
104
  def get_resource_usage():
105
  return {
 
123
  except Exception as e:
124
  return f"⚠️ Error: {str(e)}", code
125
 
126
+ # Create Gradio interface
127
  with gr.Blocks(css="static/style.css", theme=gr.themes.Soft()) as demo:
128
  knowledge = load_knowledge()
129
 
130
+ gr.Markdown("# 🤖 Multi-Agent Autonomous AI System")
131
+
132
+ with gr.Tab("Multi-Agent Task"):
133
+ goal_input = gr.Textbox(
134
+ label="Describe your task",
135
+ placeholder="What do you want to accomplish?",
136
+ lines=3
137
+ )
138
+ agent_output = gr.Textbox(
139
+ label="Multi-Agent Process",
140
+ lines=10,
141
+ interactive=False
142
+ )
143
+ run_agents_btn = gr.Button("🚀 Run Agents", variant="primary")
144
+
145
+ run_agents_btn.click(
146
+ agent_interface,
147
+ inputs=[goal_input],
148
+ outputs=[agent_output]
149
+ )
150
+
151
+ gr.Markdown("### Agent Architecture")
152
+ gr.Markdown("""
153
+ - **🧠 Planner**: Creates task execution plan
154
+ - **🛠 Executor**: Carries out each step
155
+ - **🔍 Critic**: Reviews results and provides feedback
156
+ - **💾 Memory**: Maintains context-aware knowledge
157
+ """)
158
 
159
+ with gr.Tab("Cognitive Engine"):
160
  with gr.Row():
161
  with gr.Column():
162
  task_input = gr.Textbox(
 
179
  interactive=True
180
  )
181
 
182
+ with gr.Tab("Manual Code"):
183
  with gr.Row():
184
  with gr.Column():
185
  manual_code = gr.Code(
 
205
  value=get_task_history(),
206
  interactive=False
207
  )
208
+ gr.Markdown("### Memory Log")
209
+ memory_display = gr.Textbox(
210
+ label="Agent Memory",
211
+ value="\n".join(memory["texts"][-5:]),
212
+ interactive=False
213
  )
214
  refresh_knowledge = gr.Button("🔁 Refresh Knowledge")
215
 
 
237
  refresh_knowledge.click(
238
  lambda: load_knowledge(),
239
  inputs=[],
240
+ outputs=task_history
241
  )
242
 
243
+ # Mount Gradio app to FastAPI
244
+ gr.mount_gradio_app(app, demo, path="/")
 
 
 
 
245
 
246
+ @app.get("/status")
247
+ def status():
248
+ return {"status": "active", "agents": ["planner", "executor", "critic"]}
 
 
 
 
249
 
250
+ if __name__ == "__main__":
251
+ import uvicorn
252
+ uvicorn.run(app, host="0.0.0.0", port=7860)