Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,145 +1,71 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
# - Add test result logging to vector memory.
|
4 |
-
# - Add internal test cases for agent_core logic to validate before applying updates.
|
5 |
-
|
6 |
-
# Update app.py to include dry-run simulation before applying changes
|
7 |
-
|
8 |
-
enhanced_app_code = """
|
9 |
import gradio as gr
|
10 |
-
import os
|
11 |
-
import openai
|
12 |
-
import subprocess
|
13 |
-
import json
|
14 |
import time
|
15 |
-
|
16 |
-
|
17 |
-
STATE_FILE = "agent_state.txt"
|
18 |
-
LOG_FILE = "agent_log.txt"
|
19 |
-
AGENT_CODE = "agent_core.py"
|
20 |
-
CONFIG_FILE = "config.json"
|
21 |
-
MEMORY_FILE = "vector_memory.json"
|
22 |
-
TASKS_FILE = "tasks.json"
|
23 |
-
|
24 |
-
def log(message):
|
25 |
-
with open(LOG_FILE, "a") as f:
|
26 |
-
f.write(f"{time.ctime()}: {message}\\n")
|
27 |
-
|
28 |
-
def read_log():
|
29 |
-
if os.path.exists(LOG_FILE):
|
30 |
-
with open(LOG_FILE) as f:
|
31 |
-
return f.read()
|
32 |
-
return "No logs yet."
|
33 |
-
|
34 |
-
def save_backup(code):
|
35 |
-
ts = time.strftime("%Y%m%d-%H%M%S")
|
36 |
-
backup_path = f"backup_{ts}.py"
|
37 |
-
with open(backup_path, "w") as f:
|
38 |
-
f.write(code)
|
39 |
-
return backup_path
|
40 |
-
|
41 |
-
def evaluate_change(old_code, new_code):
|
42 |
-
prompt = f\"\"\"
|
43 |
-
Compare the OLD and NEW version of the agent's core logic.
|
44 |
-
Score how much better the new version is in terms of:
|
45 |
-
- Intelligence
|
46 |
-
- Robustness
|
47 |
-
- Self-Improvement
|
48 |
-
|
49 |
-
Return a score from -10 to +10 and a reason.
|
50 |
-
|
51 |
-
[OLD CODE]
|
52 |
-
{old_code}
|
53 |
-
|
54 |
-
[NEW CODE]
|
55 |
-
{new_code}
|
56 |
-
\"\"\"
|
57 |
-
try:
|
58 |
-
response = openai.ChatCompletion.create(
|
59 |
-
model="gpt-4",
|
60 |
-
messages=[{"role": "user", "content": prompt}],
|
61 |
-
temperature=0.3
|
62 |
-
)
|
63 |
-
return response.choices[0].message["content"].strip()
|
64 |
-
except Exception as e:
|
65 |
-
return f"Evaluation failed: {e}"
|
66 |
-
|
67 |
-
def dry_run_test(code):
|
68 |
-
local_env = {}
|
69 |
-
try:
|
70 |
-
exec(code, {}, local_env)
|
71 |
-
return "Dry-run test succeeded."
|
72 |
-
except Exception as e:
|
73 |
-
return f"Dry-run test failed: {e}"
|
74 |
-
|
75 |
-
def log_memory(entry):
|
76 |
-
memory = json.load(open(MEMORY_FILE))
|
77 |
-
memory["memory"].append({
|
78 |
-
"timestamp": time.ctime(),
|
79 |
-
"thought": entry
|
80 |
-
})
|
81 |
-
with open(MEMORY_FILE, "w") as f:
|
82 |
-
json.dump(memory, f, indent=4)
|
83 |
-
|
84 |
-
def agent_tick():
|
85 |
-
log("Agent tick started.")
|
86 |
-
with open(AGENT_CODE, "r") as f:
|
87 |
-
current_code = f.read()
|
88 |
-
|
89 |
-
prompt = f\"\"\"
|
90 |
-
You are a recursive agent that improves itself.
|
91 |
-
|
92 |
-
Improve the following Python code to make it more intelligent, autonomous, and safe.
|
93 |
-
Return ONLY the improved full Python script.
|
94 |
|
95 |
-
|
96 |
-
|
|
|
97 |
|
98 |
-
|
99 |
try:
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
)
|
105 |
-
|
106 |
-
|
107 |
-
if
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
log(f"Eval: {score_report}")
|
114 |
-
log_memory(f"Eval: {score_report}")
|
115 |
-
save_backup(current_code)
|
116 |
-
with open(AGENT_CODE, "w") as f:
|
117 |
-
f.write(improved_code)
|
118 |
-
subprocess.run(["git", "add", "."], check=False)
|
119 |
-
subprocess.run(["git", "commit", "-m", "Auto-update by EvolvAI"], check=False)
|
120 |
-
log("Applied improved code.")
|
121 |
-
else:
|
122 |
-
log("Dry-run failed. Skipping update.")
|
123 |
-
else:
|
124 |
-
log("Malformed GPT output. Skipping update.")
|
125 |
except Exception as e:
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from src.utils.hf_packager import HFSpacePackager
|
2 |
+
from src.core.cognitive_engine import CognitiveEngine
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import gradio as gr
|
|
|
|
|
|
|
|
|
4 |
import time
|
5 |
+
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
# Pre-load models during space initialization
|
8 |
+
cognitive_engine = CognitiveEngine()
|
9 |
+
packager = HFSpacePackager()
|
10 |
|
11 |
+
def run_cycle(progress=gr.Progress()):
|
12 |
try:
|
13 |
+
progress(0.1, desc="Initializing self-improvement cycle")
|
14 |
+
improvement_targets = cognitive_engine.identify_improvements()
|
15 |
+
|
16 |
+
progress(0.3, desc="Generating code enhancements")
|
17 |
+
code_updates = cognitive_engine.generate_enhancements(improvement_targets)
|
18 |
+
|
19 |
+
progress(0.6, desc="Validating modifications")
|
20 |
+
if cognitive_engine.apply_enhancements(code_updates):
|
21 |
+
progress(0.8, desc="Creating system snapshot")
|
22 |
+
snapshot_url = packager.create_snapshot()
|
23 |
+
progress(0.95, desc="Updating knowledge base")
|
24 |
+
return f"✅ Cycle complete! Snapshot: {snapshot_url}"
|
25 |
+
return "❌ Improvement failed - see logs"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
except Exception as e:
|
27 |
+
return f"⚠️ Critical error: {str(e)}"
|
28 |
+
|
29 |
+
def get_resource_usage():
|
30 |
+
# Simplified resource monitoring
|
31 |
+
import psutil
|
32 |
+
return {
|
33 |
+
"CPU": f"{psutil.cpu_percent()}%",
|
34 |
+
"Memory": f"{psutil.virtual_memory().percent}%",
|
35 |
+
"Disk": f"{psutil.disk_usage('/').percent}%"
|
36 |
+
}
|
37 |
+
|
38 |
+
with gr.Blocks(css="static/style.css", theme=gr.themes.Soft()) as demo:
|
39 |
+
gr.Markdown("# 🤖 Autonomous AI System")
|
40 |
+
|
41 |
+
with gr.Row():
|
42 |
+
with gr.Column(scale=1):
|
43 |
+
start_btn = gr.Button("🚀 Start Improvement Cycle", variant="primary")
|
44 |
+
status = gr.Textbox(label="Status", interactive=False)
|
45 |
+
hf_token = gr.Textbox(label="HF Token (for private snapshots)", type="password")
|
46 |
+
|
47 |
+
gr.Markdown("### System Resources")
|
48 |
+
resource_display = gr.JSON(label="Current Usage")
|
49 |
+
|
50 |
+
demo.load(
|
51 |
+
lambda: None,
|
52 |
+
inputs=[],
|
53 |
+
outputs=resource_display,
|
54 |
+
every=5
|
55 |
+
)
|
56 |
+
|
57 |
+
with gr.Column(scale=2):
|
58 |
+
gr.HTML(open("templates/dashboard.html", "r").read())
|
59 |
+
|
60 |
+
start_btn.click(
|
61 |
+
run_cycle,
|
62 |
+
inputs=[],
|
63 |
+
outputs=status
|
64 |
+
)
|
65 |
+
|
66 |
+
if __name__ == "__main__":
|
67 |
+
demo.queue(concurrency_count=1).launch(
|
68 |
+
server_name="0.0.0.0",
|
69 |
+
server_port=7860,
|
70 |
+
show_api=True
|
71 |
+
)
|