Spaces:
Runtime error
Runtime error
Upload 9 files
Browse files- .huggingface.yaml +3 -0
- README.md +13 -21
- agents/critic.py +5 -5
- agents/executor.py +13 -11
- agents/planner.py +14 -6
- memory.py +14 -1
- orchestrator.py +23 -7
- requirements.txt +2 -5
.huggingface.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
sdk: gradio
|
2 |
+
sdk_version: 3.50.2
|
3 |
+
app_file: app.py
|
README.md
CHANGED
@@ -1,21 +1,13 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
### Run on Hugging Face Spaces
|
16 |
-
|
17 |
-
Ensure the following structure and that your Space is set to use `app.py`:
|
18 |
-
|
19 |
-
To deploy:
|
20 |
-
1. Upload the entire folder to a new Hugging Face Space (set to FastAPI + Gradio).
|
21 |
-
2. That's it!!
|
|
|
1 |
+
# 🧠 Multi-Agent Autonomous AI (Advanced)
|
2 |
+
|
3 |
+
## Features
|
4 |
+
- Planner, Executor, Critic agents
|
5 |
+
- Contextual vector memory (FAISS)
|
6 |
+
- JSON + log file memory
|
7 |
+
- Timestamped, session-aware logging
|
8 |
+
- Ready for Hugging Face Spaces
|
9 |
+
|
10 |
+
## How to Use
|
11 |
+
1. Create a Hugging Face Space (Gradio SDK)
|
12 |
+
2. Upload this ZIP folder
|
13 |
+
3. Done — `app.py` runs automatically
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
agents/critic.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
def review_result(step, result):
|
2 |
-
if "
|
3 |
-
return f"Step
|
4 |
-
|
5 |
-
return f"
|
6 |
-
return f"Step
|
|
|
1 |
def review_result(step, result):
|
2 |
+
if not result or "error" in result.lower():
|
3 |
+
return f"❌ Step failed or incomplete: {step}. Result: {result[:80]}"
|
4 |
+
if len(result) < 10:
|
5 |
+
return f"⚠️ Result seems too short for step: {step}."
|
6 |
+
return f"✅ Step successful. Result preview: {result[:80]}"
|
agents/executor.py
CHANGED
@@ -1,19 +1,21 @@
|
|
1 |
import subprocess
|
2 |
|
3 |
def execute_step(step):
|
4 |
-
|
5 |
-
|
6 |
-
|
|
|
7 |
try:
|
8 |
-
expression =
|
9 |
-
return eval(expression)
|
10 |
except:
|
11 |
-
return "Calculation
|
12 |
-
elif "python" in
|
13 |
try:
|
14 |
-
code = step.split("```python")[1].split("```")[0]
|
15 |
-
|
|
|
16 |
except Exception as e:
|
17 |
-
return f"Python
|
18 |
else:
|
19 |
-
return "
|
|
|
1 |
import subprocess
|
2 |
|
3 |
def execute_step(step):
|
4 |
+
step_lower = step.lower()
|
5 |
+
if "html" in step_lower:
|
6 |
+
return "<html><body>Hello Multi-Agent World!</body></html>"
|
7 |
+
elif "calculate" in step_lower:
|
8 |
try:
|
9 |
+
expression = step_lower.replace("calculate", "").strip()
|
10 |
+
return str(eval(expression))
|
11 |
except:
|
12 |
+
return "Calculation error."
|
13 |
+
elif "python" in step_lower or "code" in step_lower:
|
14 |
try:
|
15 |
+
code = step.split("```python")[1].split("```")[0] if "```python" in step else step
|
16 |
+
result = subprocess.check_output(["python3", "-c", code], stderr=subprocess.STDOUT, timeout=5)
|
17 |
+
return result.decode()
|
18 |
except Exception as e:
|
19 |
+
return f"Error running Python code: {e}"
|
20 |
else:
|
21 |
+
return f"No defined execution path for: {step}"
|
agents/planner.py
CHANGED
@@ -1,8 +1,16 @@
|
|
1 |
def plan_task(goal, memory):
|
|
|
2 |
if "website" in goal:
|
3 |
-
|
4 |
-
"Create HTML structure",
|
5 |
-
"Add CSS
|
6 |
-
"
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
def plan_task(goal, memory):
|
2 |
+
plan = []
|
3 |
if "website" in goal:
|
4 |
+
plan.extend([
|
5 |
+
"Create basic HTML structure",
|
6 |
+
"Add styling with CSS",
|
7 |
+
"Add interactivity with JavaScript",
|
8 |
+
"Review and test website"
|
9 |
+
])
|
10 |
+
else:
|
11 |
+
plan.extend([
|
12 |
+
f"Understand the goal: {goal}",
|
13 |
+
"Find the best method to solve it",
|
14 |
+
"Execute and gather result"
|
15 |
+
])
|
16 |
+
return plan
|
memory.py
CHANGED
@@ -1,17 +1,30 @@
|
|
1 |
from sentence_transformers import SentenceTransformer
|
2 |
import numpy as np
|
3 |
import faiss
|
|
|
|
|
4 |
|
5 |
model = SentenceTransformer("all-MiniLM-L6-v2")
|
|
|
6 |
|
7 |
def init_memory():
|
8 |
dim = 384
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
def add_to_memory(text, memory):
|
12 |
vec = model.encode([text])[0]
|
13 |
memory["index"].add(np.array([vec]))
|
14 |
memory["texts"].append(text)
|
|
|
|
|
15 |
|
16 |
def search_memory(query, memory, k=5):
|
17 |
vec = model.encode([query])[0]
|
|
|
1 |
from sentence_transformers import SentenceTransformer
|
2 |
import numpy as np
|
3 |
import faiss
|
4 |
+
import json
|
5 |
+
import os
|
6 |
|
7 |
model = SentenceTransformer("all-MiniLM-L6-v2")
|
8 |
+
MEMORY_LOG = "memory_log.json"
|
9 |
|
10 |
def init_memory():
|
11 |
dim = 384
|
12 |
+
index = faiss.IndexFlatL2(dim)
|
13 |
+
memory = {"index": index, "texts": []}
|
14 |
+
if os.path.exists(MEMORY_LOG):
|
15 |
+
with open(MEMORY_LOG, "r") as f:
|
16 |
+
memory["texts"] = json.load(f)
|
17 |
+
vectors = np.array([model.encode([text])[0] for text in memory["texts"]])
|
18 |
+
if len(vectors) > 0:
|
19 |
+
memory["index"].add(vectors)
|
20 |
+
return memory
|
21 |
|
22 |
def add_to_memory(text, memory):
|
23 |
vec = model.encode([text])[0]
|
24 |
memory["index"].add(np.array([vec]))
|
25 |
memory["texts"].append(text)
|
26 |
+
with open(MEMORY_LOG, "w") as f:
|
27 |
+
json.dump(memory["texts"], f)
|
28 |
|
29 |
def search_memory(query, memory, k=5):
|
30 |
vec = model.encode([query])[0]
|
orchestrator.py
CHANGED
@@ -1,19 +1,35 @@
|
|
1 |
from agents.planner import plan_task
|
2 |
from agents.executor import execute_step
|
3 |
from agents.critic import review_result
|
4 |
-
from memory import add_to_memory
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
def run_agents(goal, memory):
|
7 |
plan = plan_task(goal, memory)
|
8 |
-
add_to_memory(f"
|
|
|
9 |
|
10 |
outputs = []
|
11 |
for step in plan:
|
12 |
result = execute_step(step)
|
13 |
-
add_to_memory(f"Executor: {step} -> {result}", memory)
|
14 |
|
15 |
review = review_result(step, result)
|
16 |
-
add_to_memory(f"Critic: {review}", memory)
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
-
return outputs
|
|
|
1 |
from agents.planner import plan_task
|
2 |
from agents.executor import execute_step
|
3 |
from agents.critic import review_result
|
4 |
+
from memory import add_to_memory, search_memory
|
5 |
+
import uuid
|
6 |
+
from datetime import datetime
|
7 |
+
|
8 |
+
def run_agents(goal, memory, session_id=None):
|
9 |
+
session_id = session_id or str(uuid.uuid4())[:8]
|
10 |
+
timestamp = datetime.now().isoformat(timespec="seconds")
|
11 |
+
|
12 |
+
log = f"[{timestamp}] Session {session_id}: Goal: {goal}\n"
|
13 |
+
history = search_memory(goal, memory)
|
14 |
+
context = "\n".join(history[:3]) if history else "No relevant memory found."
|
15 |
|
|
|
16 |
plan = plan_task(goal, memory)
|
17 |
+
add_to_memory(f"[{session_id}] 🧠 Context: {context}", memory)
|
18 |
+
add_to_memory(f"[{session_id}] 🗂 Plan: {plan}", memory)
|
19 |
|
20 |
outputs = []
|
21 |
for step in plan:
|
22 |
result = execute_step(step)
|
23 |
+
add_to_memory(f"[{session_id}] 🔧 Executor ran: {step} -> {result}", memory)
|
24 |
|
25 |
review = review_result(step, result)
|
26 |
+
add_to_memory(f"[{session_id}] 🔍 Critic: {review}", memory)
|
27 |
+
|
28 |
+
step_log = f"🔹 Step: {step}\n🛠 Result: {result}\n🧠 Review: {review}\n"
|
29 |
+
log += step_log + "\n"
|
30 |
+
outputs.append(step_log)
|
31 |
+
|
32 |
+
with open("log.txt", "a") as f:
|
33 |
+
f.write(log + "\n")
|
34 |
|
35 |
+
return "\n".join(outputs)
|
requirements.txt
CHANGED
@@ -1,9 +1,6 @@
|
|
1 |
fastapi
|
2 |
uvicorn
|
3 |
-
gradio
|
4 |
faiss-cpu
|
5 |
sentence-transformers
|
6 |
-
numpy
|
7 |
-
transformers
|
8 |
-
torch
|
9 |
-
accelerate
|
|
|
1 |
fastapi
|
2 |
uvicorn
|
3 |
+
gradio
|
4 |
faiss-cpu
|
5 |
sentence-transformers
|
6 |
+
numpy
|
|
|
|
|
|