Leonydis137 commited on
Commit
c84643c
·
verified ·
1 Parent(s): ba8d6c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -139
app.py CHANGED
@@ -1,145 +1,71 @@
1
- # Final major enhancement wave:
2
- # - Add dry-run testing of proposed code using `exec()` in isolated namespace.
3
- # - Add test result logging to vector memory.
4
- # - Add internal test cases for agent_core logic to validate before applying updates.
5
-
6
- # Update app.py to include dry-run simulation before applying changes
7
-
8
- enhanced_app_code = """
9
  import gradio as gr
10
- import os
11
- import openai
12
- import subprocess
13
- import json
14
  import time
15
- from difflib import unified_diff
16
-
17
- STATE_FILE = "agent_state.txt"
18
- LOG_FILE = "agent_log.txt"
19
- AGENT_CODE = "agent_core.py"
20
- CONFIG_FILE = "config.json"
21
- MEMORY_FILE = "vector_memory.json"
22
- TASKS_FILE = "tasks.json"
23
-
24
- def log(message):
25
- with open(LOG_FILE, "a") as f:
26
- f.write(f"{time.ctime()}: {message}\\n")
27
-
28
- def read_log():
29
- if os.path.exists(LOG_FILE):
30
- with open(LOG_FILE) as f:
31
- return f.read()
32
- return "No logs yet."
33
-
34
- def save_backup(code):
35
- ts = time.strftime("%Y%m%d-%H%M%S")
36
- backup_path = f"backup_{ts}.py"
37
- with open(backup_path, "w") as f:
38
- f.write(code)
39
- return backup_path
40
-
41
- def evaluate_change(old_code, new_code):
42
- prompt = f\"\"\"
43
- Compare the OLD and NEW version of the agent's core logic.
44
- Score how much better the new version is in terms of:
45
- - Intelligence
46
- - Robustness
47
- - Self-Improvement
48
-
49
- Return a score from -10 to +10 and a reason.
50
-
51
- [OLD CODE]
52
- {old_code}
53
-
54
- [NEW CODE]
55
- {new_code}
56
- \"\"\"
57
- try:
58
- response = openai.ChatCompletion.create(
59
- model="gpt-4",
60
- messages=[{"role": "user", "content": prompt}],
61
- temperature=0.3
62
- )
63
- return response.choices[0].message["content"].strip()
64
- except Exception as e:
65
- return f"Evaluation failed: {e}"
66
-
67
- def dry_run_test(code):
68
- local_env = {}
69
- try:
70
- exec(code, {}, local_env)
71
- return "Dry-run test succeeded."
72
- except Exception as e:
73
- return f"Dry-run test failed: {e}"
74
-
75
- def log_memory(entry):
76
- memory = json.load(open(MEMORY_FILE))
77
- memory["memory"].append({
78
- "timestamp": time.ctime(),
79
- "thought": entry
80
- })
81
- with open(MEMORY_FILE, "w") as f:
82
- json.dump(memory, f, indent=4)
83
-
84
- def agent_tick():
85
- log("Agent tick started.")
86
- with open(AGENT_CODE, "r") as f:
87
- current_code = f.read()
88
-
89
- prompt = f\"\"\"
90
- You are a recursive agent that improves itself.
91
-
92
- Improve the following Python code to make it more intelligent, autonomous, and safe.
93
- Return ONLY the improved full Python script.
94
 
95
- {current_code}
96
- \"\"\"
 
97
 
98
- openai.api_key = os.getenv("OPENAI_API_KEY")
99
  try:
100
- response = openai.ChatCompletion.create(
101
- model="gpt-4",
102
- messages=[{"role": "user", "content": prompt}],
103
- temperature=0.5
104
- )
105
- improved_code = response.choices[0].message["content"]
106
-
107
- if improved_code.strip().startswith("import"):
108
- dry_result = dry_run_test(improved_code)
109
- log(f"Dry-run result: {dry_result}")
110
- log_memory(f"Dry-run: {dry_result}")
111
- if "succeeded" in dry_result:
112
- score_report = evaluate_change(current_code, improved_code)
113
- log(f"Eval: {score_report}")
114
- log_memory(f"Eval: {score_report}")
115
- save_backup(current_code)
116
- with open(AGENT_CODE, "w") as f:
117
- f.write(improved_code)
118
- subprocess.run(["git", "add", "."], check=False)
119
- subprocess.run(["git", "commit", "-m", "Auto-update by EvolvAI"], check=False)
120
- log("Applied improved code.")
121
- else:
122
- log("Dry-run failed. Skipping update.")
123
- else:
124
- log("Malformed GPT output. Skipping update.")
125
  except Exception as e:
126
- log(f"Error during update: {e}")
127
- log_memory(f"Update error: {e}")
128
-
129
- return read_log()
130
-
131
- def stop_agent():
132
- log("Agent manually stopped.")
133
- return read_log()
134
-
135
- with gr.Blocks() as demo:
136
- gr.Markdown("# 🤖 EvolvAI v2: Self-Evolving Agent UI")
137
- log_display = gr.Textbox(label="Log Output", lines=20)
138
- start_btn = gr.Button("Run Self-Update")
139
- stop_btn = gr.Button("Stop Agent")
140
-
141
- start_btn.click(agent_tick, outputs=log_display)
142
- stop_btn.click(stop_agent, outputs=log_display)
143
-
144
- demo.launch()
145
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.utils.hf_packager import HFSpacePackager
2
+ from src.core.cognitive_engine import CognitiveEngine
 
 
 
 
 
 
3
  import gradio as gr
 
 
 
 
4
  import time
5
+ import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ # Pre-load models during space initialization
8
+ cognitive_engine = CognitiveEngine()
9
+ packager = HFSpacePackager()
10
 
11
+ def run_cycle(progress=gr.Progress()):
12
  try:
13
+ progress(0.1, desc="Initializing self-improvement cycle")
14
+ improvement_targets = cognitive_engine.identify_improvements()
15
+
16
+ progress(0.3, desc="Generating code enhancements")
17
+ code_updates = cognitive_engine.generate_enhancements(improvement_targets)
18
+
19
+ progress(0.6, desc="Validating modifications")
20
+ if cognitive_engine.apply_enhancements(code_updates):
21
+ progress(0.8, desc="Creating system snapshot")
22
+ snapshot_url = packager.create_snapshot()
23
+ progress(0.95, desc="Updating knowledge base")
24
+ return f" Cycle complete! Snapshot: {snapshot_url}"
25
+ return "❌ Improvement failed - see logs"
 
 
 
 
 
 
 
 
 
 
 
 
26
  except Exception as e:
27
+ return f"⚠️ Critical error: {str(e)}"
28
+
29
+ def get_resource_usage():
30
+ # Simplified resource monitoring
31
+ import psutil
32
+ return {
33
+ "CPU": f"{psutil.cpu_percent()}%",
34
+ "Memory": f"{psutil.virtual_memory().percent}%",
35
+ "Disk": f"{psutil.disk_usage('/').percent}%"
36
+ }
37
+
38
+ with gr.Blocks(css="static/style.css", theme=gr.themes.Soft()) as demo:
39
+ gr.Markdown("# 🤖 Autonomous AI System")
40
+
41
+ with gr.Row():
42
+ with gr.Column(scale=1):
43
+ start_btn = gr.Button("🚀 Start Improvement Cycle", variant="primary")
44
+ status = gr.Textbox(label="Status", interactive=False)
45
+ hf_token = gr.Textbox(label="HF Token (for private snapshots)", type="password")
46
+
47
+ gr.Markdown("### System Resources")
48
+ resource_display = gr.JSON(label="Current Usage")
49
+
50
+ demo.load(
51
+ lambda: None,
52
+ inputs=[],
53
+ outputs=resource_display,
54
+ every=5
55
+ )
56
+
57
+ with gr.Column(scale=2):
58
+ gr.HTML(open("templates/dashboard.html", "r").read())
59
+
60
+ start_btn.click(
61
+ run_cycle,
62
+ inputs=[],
63
+ outputs=status
64
+ )
65
+
66
+ if __name__ == "__main__":
67
+ demo.queue(concurrency_count=1).launch(
68
+ server_name="0.0.0.0",
69
+ server_port=7860,
70
+ show_api=True
71
+ )