Miquel Farré commited on
Commit
74c93f6
·
1 Parent(s): a4f3b3f

first running version

Browse files
Files changed (3) hide show
  1. app.py +118 -20
  2. e2bqwen.py +536 -0
  3. requirements.txt +4 -1
app.py CHANGED
@@ -1,13 +1,23 @@
1
  import gradio as gr
2
  import os
 
3
  from e2b_desktop import Sandbox
 
 
 
 
 
4
 
5
  E2B_API_KEY = os.getenv("E2B_API_KEY")
6
  SANDBOXES = {}
 
 
7
  TMP_DIR = './tmp/'
8
  if not os.path.exists(TMP_DIR):
9
  os.makedirs(TMP_DIR)
10
 
 
 
11
  custom_css = """
12
  /* Your existing CSS */
13
  .sandbox-outer-wrapper {
@@ -39,11 +49,11 @@ custom_css = """
39
  position: absolute;
40
  top: 10%;
41
  left: 25%;
42
- width: 1032px;
43
- height: 776px;
44
  border: 4px solid #444444;
45
  transform-origin: 0 0;
46
- transform: scale(0.392);
47
  }
48
 
49
  /* Status indicator light */
@@ -103,14 +113,44 @@ html_template = """
103
  </div>
104
  </div>"""
105
 
106
- def update_html(interactive_mode, request: gr.Request):
107
- if request.session_hash not in SANDBOXES: # str necessary to run locally when hash is None
108
- print("No sandbox found, creating new one", request.session_hash)
109
- desktop = Sandbox(api_key=E2B_API_KEY, resolution=(1024, 768), dpi=96)
110
- desktop.stream.start(require_auth=True)
111
- SANDBOXES[request.session_hash] = desktop
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
- desktop = SANDBOXES[request.session_hash]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  auth_key = desktop.stream.get_auth_key()
115
 
116
  # Add view_only parameter based on interactive_mode
@@ -128,6 +168,56 @@ def update_html(interactive_mode, request: gr.Request):
128
  )
129
  return html_content
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  # Create a Gradio app with Blocks
132
  with gr.Blocks(css=custom_css) as demo:
133
  gr.HTML("""<h1 style="text-align: center">Personal Computer Assistant</h1>""")
@@ -142,36 +232,44 @@ with gr.Blocks(css=custom_css) as demo:
142
  label="Output"
143
  )
144
 
145
- # Text input for placeholder text
146
- placeholder_input = gr.Textbox(
147
  value="Find picture of cute puppies",
148
  label="Enter your command"
149
  )
150
 
151
  # Interactive mode checkbox
152
  interactive_mode = gr.Checkbox(
153
- value=True,
154
  label="Interactive Mode"
155
  )
156
 
 
 
 
 
 
 
157
  # Update button
158
  update_btn = gr.Button("Let's go!")
159
 
160
- # Connect the components
161
- update_btn.click(
162
  fn=update_html,
163
  inputs=[interactive_mode],
164
  outputs=[html_output]
165
  )
166
 
167
- # Also update when interactive mode changes
168
- interactive_mode.change(
169
- fn=update_html,
170
- inputs=[interactive_mode],
171
- outputs=[html_output]
172
  )
173
 
 
174
  demo.load(update_html, [interactive_mode], html_output)
 
175
  # Launch the app
176
  if __name__ == "__main__":
177
  demo.launch()
 
1
  import gradio as gr
2
  import os
3
+ import shutil
4
  from e2b_desktop import Sandbox
5
+ from huggingface_hub import upload_folder
6
+ from smolagents.monitoring import LogLevel
7
+ from textwrap import dedent
8
+
9
+ from e2bqwen import QwenVLAPIModel, E2BVisionAgent
10
 
11
  E2B_API_KEY = os.getenv("E2B_API_KEY")
12
  SANDBOXES = {}
13
+ WIDTH = 1920
14
+ HEIGHT = 1440
15
  TMP_DIR = './tmp/'
16
  if not os.path.exists(TMP_DIR):
17
  os.makedirs(TMP_DIR)
18
 
19
+ model = QwenVLAPIModel()
20
+
21
  custom_css = """
22
  /* Your existing CSS */
23
  .sandbox-outer-wrapper {
 
49
  position: absolute;
50
  top: 10%;
51
  left: 25%;
52
+ width: 1928px;
53
+ height: 1448px;
54
  border: 4px solid #444444;
55
  transform-origin: 0 0;
56
+ transform: scale(0.207);
57
  }
58
 
59
  /* Status indicator light */
 
113
  </div>
114
  </div>"""
115
 
116
+ def upload_to_hf_and_remove(folder_path):
117
+
118
+ repo_id = "open-agents/os-agent-logs"
119
+ try:
120
+ folder_name = os.path.basename(os.path.normpath(folder_path))
121
+
122
+ # Upload the folder to Huggingface
123
+ print(f"Uploading {folder_path} to {repo_id}/{folder_name}...")
124
+ url = upload_folder(
125
+ folder_path=folder_path,
126
+ repo_id=repo_id,
127
+ repo_type="dataset",
128
+ path_in_repo=folder_name,
129
+ ignore_patterns=[".git/*", ".gitignore"]
130
+ )
131
+
132
+ # Remove the local folder after successful upload
133
+ print(f"Upload complete. Removing local folder {folder_path}...")
134
+ shutil.rmtree(folder_path)
135
+ print("Local folder removed successfully.")
136
+
137
+ return url
138
 
139
+ except Exception as e:
140
+ print(f"Error during upload or cleanup: {str(e)}")
141
+ raise
142
+
143
+
144
+ def get_or_create_sandbox(session_hash):
145
+ if session_hash not in SANDBOXES:
146
+ print(f"Creating new sandbox for session {session_hash}")
147
+ desktop = Sandbox(api_key=E2B_API_KEY, resolution=(WIDTH, HEIGHT), dpi=96)
148
+ desktop.stream.start(require_auth=True)
149
+ SANDBOXES[session_hash] = desktop
150
+ return SANDBOXES[session_hash]
151
+
152
+ def update_html(interactive_mode, request: gr.Request):
153
+ desktop = get_or_create_sandbox(request.session_hash)
154
  auth_key = desktop.stream.get_auth_key()
155
 
156
  # Add view_only parameter based on interactive_mode
 
168
  )
169
  return html_content
170
 
171
+ def run_agent_task(task_input, interactive_mode, request: gr.Request):
172
+ session_hash = request.session_hash
173
+ desktop = get_or_create_sandbox(session_hash)
174
+
175
+ # Create data directory for this session
176
+ data_dir = os.path.join(TMP_DIR, session_hash)
177
+ if not os.path.exists(data_dir):
178
+ os.makedirs(data_dir)
179
+
180
+
181
+ # Create the agent
182
+ agent = E2BVisionAgent(
183
+ model=model,
184
+ data_dir=data_dir,
185
+ desktop=desktop,
186
+ max_steps=200,
187
+ verbosity_level=LogLevel.DEBUG,
188
+ planning_interval=5,
189
+ )
190
+
191
+ # Construct the full task with instructions
192
+ full_task = task_input + dedent(f"""
193
+ The desktop has a resolution of {WIDTH}x{HEIGHT}, take it into account to decide clicking coordinates.
194
+ When clicking an element, always make sure to click THE MIDDLE of that element! Else you risk to miss it.
195
+
196
+ Always analyze the latest screenshot carefully before performing actions. Make sure to:
197
+ 1. Look at elements on the screen to determine what to click or interact with
198
+ 2. Use precise coordinates for mouse movements and clicks
199
+ 3. Wait for page loads or animations to complete using the wait() tool
200
+ 4. Sometimes you may have missed a click, so never assume that you're on the right page, always make sure that your previous action worked In the screenshot you can see if the mouse is out of the clickable area. Pay special attention to this.
201
+
202
+ When you receive a task, break it down into step-by-step actions. On each step, look at the current screenshot to validate if previous steps worked and decide the next action.
203
+ We can only execute one action at a time. On each step, answer only a python blob with the action to perform
204
+ """)
205
+
206
+ try:
207
+ # Run the agent
208
+ result = agent.run(full_task)
209
+
210
+ interactive_mode = True
211
+ return f"Task completed: {result}", update_html(interactive_mode, request)
212
+ except Exception as e:
213
+ error_message = f"Error running agent: {str(e)}"
214
+ print(error_message)
215
+ interactive_mode = True
216
+ return error_message, update_html(interactive_mode, request)
217
+ finally:
218
+ upload_to_hf_and_remove(data_dir)
219
+
220
+
221
  # Create a Gradio app with Blocks
222
  with gr.Blocks(css=custom_css) as demo:
223
  gr.HTML("""<h1 style="text-align: center">Personal Computer Assistant</h1>""")
 
232
  label="Output"
233
  )
234
 
235
+ # Text input for task
236
+ task_input = gr.Textbox(
237
  value="Find picture of cute puppies",
238
  label="Enter your command"
239
  )
240
 
241
  # Interactive mode checkbox
242
  interactive_mode = gr.Checkbox(
243
+ value=False,
244
  label="Interactive Mode"
245
  )
246
 
247
+ # Results output
248
+ results_output = gr.Textbox(
249
+ label="Results",
250
+ interactive=False
251
+ )
252
+
253
  # Update button
254
  update_btn = gr.Button("Let's go!")
255
 
256
+ # Connect the components for displaying the sandbox
257
+ interactive_mode.change(
258
  fn=update_html,
259
  inputs=[interactive_mode],
260
  outputs=[html_output]
261
  )
262
 
263
+ # Connect the components for running the agent
264
+ update_btn.click(
265
+ fn=run_agent_task,
266
+ inputs=[task_input, interactive_mode],
267
+ outputs=[results_output]
268
  )
269
 
270
+ # Load the sandbox on app start
271
  demo.load(update_html, [interactive_mode], html_output)
272
+
273
  # Launch the app
274
  if __name__ == "__main__":
275
  demo.launch()
e2bqwen.py ADDED
@@ -0,0 +1,536 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import base64
4
+ from io import BytesIO
5
+ from textwrap import dedent
6
+ from typing import Any, Dict, List, Optional, Tuple
7
+ import json
8
+
9
+ # HF API params
10
+ from huggingface_hub import InferenceClient
11
+
12
+ # E2B imports
13
+ from e2b_desktop import Sandbox
14
+ from PIL import Image
15
+
16
+ # SmolaAgents imports
17
+ from smolagents import CodeAgent, tool, HfApiModel
18
+ from smolagents.memory import ActionStep
19
+ from smolagents.models import ChatMessage, MessageRole, Model
20
+ from smolagents.monitoring import LogLevel
21
+
22
+
23
+ class E2BVisionAgent(CodeAgent):
24
+ """Agent for e2b desktop automation with Qwen2.5VL vision capabilities"""
25
+ def __init__(
26
+ self,
27
+ model: HfApiModel,
28
+ data_dir: str,
29
+ desktop: Sandbox,
30
+ tools: List[tool] = None,
31
+ max_steps: int = 200,
32
+ verbosity_level: LogLevel = 4,
33
+ planning_interval: int = 15,
34
+ **kwargs
35
+ ):
36
+ self.desktop = desktop
37
+ self.data_dir = data_dir
38
+ self.planning_interval = planning_interval
39
+ # Initialize Desktop
40
+ self.width, self.height = self.desktop.get_screen_size()
41
+ print(f"Screen size: {self.width}x{self.height}")
42
+
43
+
44
+ # Set up temp directory
45
+ os.makedirs(self.data_dir, exist_ok=True)
46
+ print(f"Screenshots and steps will be saved to: {self.data_dir}")
47
+ print(f"Verbosity level set to {verbosity_level}")
48
+
49
+
50
+ # Initialize base agent
51
+ super().__init__(
52
+ tools=tools or [],
53
+ model=model,
54
+ max_steps=max_steps,
55
+ verbosity_level=verbosity_level,
56
+ planning_interval = self.planning_interval,
57
+ **kwargs
58
+ )
59
+
60
+
61
+ # Add screen info to state
62
+ self.state["screen_width"] = self.width
63
+ self.state["screen_height"] = self.height
64
+
65
+
66
+ # Add default tools
67
+ self._setup_desktop_tools()
68
+ self.step_callbacks.append(self.take_snapshot_callback)
69
+
70
+
71
+ def initialize_system_prompt(self):
72
+ return """You are a desktop automation assistant that can control a remote desktop environment.
73
+ You only have access to the following tools to interact with the desktop, no additional ones:
74
+
75
+ - click(x, y): Performs a left-click at the specified coordinates
76
+ - right_click(x, y): Performs a right-click at the specified coordinates
77
+ - double_click(x, y): Performs a double-click at the specified coordinates
78
+ - move_mouse(x, y): Moves the mouse cursor to the specified coordinates
79
+ - type_text(text): Types the specified text at the current cursor position
80
+ - press_key(key): Presses a keyboard key (e.g., "Return", "tab", "ctrl+c")
81
+ - scroll(direction, amount): Scrolls a website in a browser or a document (direction can be "up" or "down", a common amount is 1 or 2 scroll("down",1) ). DO NOT use scroll to move through linux desktop menus.
82
+ - wait(seconds): Waits for the specified number of seconds. Very useful in case the prior order is still executing (for example starting very heavy applications like browsers or office apps)
83
+ - open_url(url): Directly opens a browser with the specified url, saves time compared to clicking in a browser and going through the initial setup wizard.
84
+ - final_answer("YOUR FINAL ANSWER TEXT"): Announces that the task requested is completed and provides a final text
85
+
86
+ The desktop has a resolution of {resolution_x}x{resolution_y}.
87
+
88
+ IMPORTANT:
89
+ - Remember the tools that you have as those can save you time, for example open_url to enter a website rather than searching for the browser in the OS.
90
+ - Whenever you click, MAKE SURE to click in the middle of the button, text, link or any other clickable element. Not under, not on the side. IN THE MIDDLE. In menus it is always better to click in the middle of the text rather than in the tiny icon. Calculate extremelly well the coordinates. A mistake here can make the full task fail.
91
+ - To navigate the desktop you should open menus and click. Menus usually expand with more options, the tiny triangle next to some text in a menu means that menu expands. For example in Office in the Applications menu expands showing presentation or writing applications.
92
+ - Always analyze the latest screenshot carefully before performing actions. If you clicked somewhere in the previous action and in the screenshot nothing happened, make sure the mouse is where it should be. Otherwise you can see that the coordinates were wrong.
93
+
94
+ You must proceed step by step:
95
+ 1. Understand the task thoroughly
96
+ 2. Break down the task into logical steps
97
+ 3. For each step:
98
+ a. Analyze the current screenshot to identify UI elements
99
+ b. Plan the appropriate action with precise coordinates
100
+ c. Execute ONE action at a time using the proper tool
101
+ d. Wait for the action to complete before proceeding
102
+
103
+ After each action, you'll receive an updated screenshot. Review it carefully before your next action.
104
+
105
+ COMMAND FORMAT:
106
+ Always format your actions as Python code blocks. For example:
107
+
108
+ ```python
109
+ click(250, 300)
110
+ ```<end_code>
111
+
112
+
113
+ TASK EXAMPLE:
114
+ For a task like "Open a text editor and type 'Hello World'":
115
+ 1- First, analyze the screenshot to find the Applications menu and click on it being very precise, clicking in the middle of the text 'Applications':
116
+ ```python
117
+ click(50, 10)
118
+ ```<end_code>
119
+ 2- Remembering that menus are navigated through clicking, after analyzing the screenshot with the applications menu open we see that a notes application probably fits in the Accessories section (we see it is a section in the menu thanks to the tiny white triangle after the text accessories). We look for Accessories and click on it being very precise, clicking in the middle of the text 'Accessories'. DO NOT try to move through the menus with scroll, it won't work:
120
+ ```python
121
+ click(76, 195)
122
+ ```<end_code>
123
+ 3- Remembering that menus are navigated through clicking, after analyzing the screenshot with the submenu Accessories open, look for 'Text Editor' and click on it being very precise, clicking in the middle of the text 'Text Editor':
124
+ ```python
125
+ click(241, 441)
126
+ ```<end_code>
127
+ 4- Once Notepad is open, type the requested text:
128
+ ```python
129
+ type_text("Hello World")
130
+ ```<end_code>
131
+
132
+ 5- Task is completed:
133
+ ```python
134
+ final_answer("Done")
135
+ ```<end_code>
136
+
137
+ Remember to:
138
+
139
+ Always wait for appropriate loading times
140
+ Use precise coordinates based on the current screenshot
141
+ Execute one action at a time
142
+ Verify the result before proceeding to the next step
143
+ Use click to move through menus on the desktop and scroll for web and specific applications.
144
+ REMEMBER TO ALWAYS CLICK IN THE MIDDLE OF THE TEXT, NOT ON THE SIDE, NOT UNDER.
145
+ """.format(resolution_x=self.width, resolution_y=self.height)
146
+
147
+ def _setup_desktop_tools(self):
148
+ """Register all desktop tools"""
149
+ @tool
150
+ def click(x: int, y: int) -> str:
151
+ """
152
+ Performs a left-click at the specified coordinates
153
+ Args:
154
+ x: The x coordinate (horizontal position)
155
+ y: The y coordinate (vertical position)
156
+ """
157
+ self.desktop.move_mouse(x, y)
158
+ self.desktop.left_click()
159
+ return f"Clicked at coordinates ({x}, {y})"
160
+
161
+ @tool
162
+ def right_click(x: int, y: int) -> str:
163
+ """
164
+ Performs a right-click at the specified coordinates
165
+ Args:
166
+ x: The x coordinate (horizontal position)
167
+ y: The y coordinate (vertical position)
168
+ """
169
+ self.desktop.move_mouse(x, y)
170
+ self.desktop.right_click()
171
+ return f"Right-clicked at coordinates ({x}, {y})"
172
+
173
+ @tool
174
+ def double_click(x: int, y: int) -> str:
175
+ """
176
+ Performs a double-click at the specified coordinates
177
+ Args:
178
+ x: The x coordinate (horizontal position)
179
+ y: The y coordinate (vertical position)
180
+ """
181
+ self.desktop.move_mouse(x, y)
182
+ self.desktop.double_click()
183
+ return f"Double-clicked at coordinates ({x}, {y})"
184
+
185
+ @tool
186
+ def move_mouse(x: int, y: int) -> str:
187
+ """
188
+ Moves the mouse cursor to the specified coordinates
189
+ Args:
190
+ x: The x coordinate (horizontal position)
191
+ y: The y coordinate (vertical position)
192
+ """
193
+ self.desktop.move_mouse(x, y)
194
+ return f"Moved mouse to coordinates ({x}, {y})"
195
+
196
+ @tool
197
+ def type_text(text: str, delay_in_ms: int = 75) -> str:
198
+ """
199
+ Types the specified text at the current cursor position
200
+ Args:
201
+ text: The text to type
202
+ delay_in_ms: Delay between keystrokes in milliseconds
203
+ """
204
+ self.desktop.write(text, delay_in_ms=delay_in_ms)
205
+ return f"Typed text: '{text}'"
206
+
207
+ @tool
208
+ def press_key(key: str) -> str:
209
+ """
210
+ Presses a keyboard key
211
+ Args:
212
+ key: The key to press (e.g., "Return", "tab", "ctrl+c")
213
+ """
214
+ if key == "enter":
215
+ key = "Return"
216
+ self.desktop.press(key)
217
+ return f"Pressed key: {key}"
218
+
219
+ @tool
220
+ def go_back() -> str:
221
+ """
222
+ Goes back to the previous page in the browser.
223
+ Args:
224
+ """
225
+ self.desktop.press(["alt", "left"])
226
+ return "Went back one page"
227
+
228
+ @tool
229
+ def scroll(direction: str = "down", amount: int = 1) -> str:
230
+ """
231
+ Scrolls the page
232
+ Args:
233
+ direction: The direction to scroll ("up" or "down"), defaults to "down"
234
+ amount: The amount to scroll. A good amount is 1 or 2.
235
+ """
236
+ self.desktop.scroll(direction=direction, amount=amount)
237
+ return f"Scrolled {direction} by {amount}"
238
+
239
+ @tool
240
+ def wait(seconds: float) -> str:
241
+ """
242
+ Waits for the specified number of seconds
243
+ Args:
244
+ seconds: Number of seconds to wait
245
+ """
246
+ time.sleep(seconds)
247
+ return f"Waited for {seconds} seconds"
248
+
249
+ @tool
250
+ def open_url(url: str) -> str:
251
+ """
252
+ Opens the specified URL in the default browser
253
+ Args:
254
+ url: The URL to open
255
+ """
256
+ # Make sure URL has http/https prefix
257
+ if not url.startswith(("http://", "https://")):
258
+ url = "https://" + url
259
+
260
+ self.desktop.open(url)
261
+ # Give it time to load
262
+ time.sleep(2)
263
+ return f"Opened URL: {url}"
264
+
265
+
266
+ # Register the tools
267
+ self.tools["click"] = click
268
+ self.tools["right_click"] = right_click
269
+ self.tools["double_click"] = double_click
270
+ self.tools["move_mouse"] = move_mouse
271
+ self.tools["type_text"] = type_text
272
+ self.tools["press_key"] = press_key
273
+ self.tools["scroll"] = scroll
274
+ self.tools["wait"] = wait
275
+ self.tools["open_url"] = open_url
276
+ self.tools["go_back"] = go_back
277
+
278
+
279
+ def store_metadata_to_file(self, agent) -> None:
280
+ metadata_path = os.path.join(self.data_dir, "metadata.json")
281
+ output = {}
282
+ output['memory'] = self.write_memory_to_messages()
283
+ output['total_token_counts'] = agent.monitor.get_total_token_counts()
284
+ a = open(metadata_path,"w")
285
+ a.write(json.dumps(output))
286
+ a.close()
287
+
288
+ def write_memory_to_messages(self) -> List[Dict[str, Any]]:
289
+ """Convert memory to messages for the model"""
290
+ messages = [{"role": MessageRole.SYSTEM, "content": [{"type": "text", "text": self.system_prompt}]}]
291
+
292
+ for memory_step in self.memory.steps:
293
+ if hasattr(memory_step, "task") and memory_step.task:
294
+ # Add task message if it exists
295
+ messages.append({
296
+ "role": MessageRole.USER,
297
+ "content": [{"type": "text", "text": memory_step.task}]
298
+ })
299
+ continue # Skip to next step after adding task
300
+
301
+ # Process model output message if it exists
302
+ if hasattr(memory_step, "model_output") and memory_step.model_output:
303
+ messages.append({
304
+ "role": MessageRole.ASSISTANT,
305
+ "content": [{"type": "text", "text": memory_step.model_output}]
306
+ })
307
+
308
+ # Process observations and images
309
+ observation_content = []
310
+
311
+
312
+ # Add text observations if any
313
+ if hasattr(memory_step, "observations") and memory_step.observations:
314
+ observation_content.append({"type": "text", "text": f"Observation: {memory_step.observations}"})
315
+
316
+ # Add error if present and didn't already add observations
317
+ if hasattr(memory_step, "error") and memory_step.error:
318
+ observation_content.append({"type": "text", "text": f"Error: {memory_step.error}"})
319
+
320
+ # Add user message with content if we have any
321
+ if observation_content:
322
+ messages.append({
323
+ "role": MessageRole.USER,
324
+ "content": observation_content
325
+ })
326
+
327
+ return messages
328
+
329
+
330
+ def write_memory_to_messages(self, summary_mode: Optional[bool] = False) -> List[Dict[str, Any]]:
331
+ """Convert memory to messages for the model"""
332
+ messages = [{"role": MessageRole.SYSTEM, "content": [{"type": "text", "text": self.system_prompt}]}]
333
+ # Get the last memory step
334
+ last_step = self.memory.steps[-1] if self.memory.steps else None
335
+
336
+ for memory_step in self.memory.steps:
337
+ if hasattr(memory_step, "task") and memory_step.task:
338
+ # Add task message if it exists
339
+ messages.append({
340
+ "role": MessageRole.USER,
341
+ "content": [{"type": "text", "text": memory_step.task}]
342
+ })
343
+ continue # Skip to next step after adding task
344
+
345
+ # Process model output message if it exists
346
+ if hasattr(memory_step, "model_output") and memory_step.model_output:
347
+ messages.append({
348
+ "role": MessageRole.ASSISTANT,
349
+ "content": [{"type": "text", "text": memory_step.model_output}]
350
+ })
351
+
352
+ # Process observations and images
353
+ observation_content = []
354
+
355
+ # Add screenshot image paths if present
356
+ if memory_step is last_step and hasattr(memory_step, "observations_images") and memory_step.observations_images:
357
+ self.logger.log(f"Found {len(memory_step.observations_images)} image paths in step", level=LogLevel.DEBUG)
358
+ for img_path in memory_step.observations_images:
359
+ if isinstance(img_path, str) and os.path.exists(img_path):
360
+ observation_content.append({"type": "image", "image": img_path})
361
+ elif isinstance(img_path, Image.Image):
362
+ screenshot_path = f"screenshot_{int(time.time() * 1000)}.png"
363
+ img_path.save(screenshot_path)
364
+ observation_content.append({"type": "image", "image": screenshot_path})
365
+ else:
366
+ self.logger.log(f" - Skipping invalid image: {type(img_path)}", level=LogLevel.ERROR)
367
+
368
+ # Add text observations if any
369
+ if hasattr(memory_step, "observations") and memory_step.observations:
370
+ self.logger.log(f" - Adding text observation", level=LogLevel.DEBUG)
371
+ observation_content.append({"type": "text", "text": f"Observation: {memory_step.observations}"})
372
+
373
+ # Add error if present and didn't already add observations
374
+ if hasattr(memory_step, "error") and memory_step.error:
375
+ self.logger.log(f" - Adding error message", level=LogLevel.DEBUG)
376
+ observation_content.append({"type": "text", "text": f"Error: {memory_step.error}"})
377
+
378
+ # Add user message with content if we have any
379
+ if observation_content:
380
+ self.logger.log(f" - Adding user message with {len(observation_content)} content items", level=LogLevel.DEBUG)
381
+ messages.append({
382
+ "role": MessageRole.USER,
383
+ "content": observation_content
384
+ })
385
+
386
+ # # Check for images in final message list
387
+ # image_count = 0
388
+ # for msg in messages:
389
+ # if isinstance(msg.get("content"), list):
390
+ # for item in msg["content"]:
391
+ # if isinstance(item, dict) and item.get("type") == "image":
392
+ # image_count += 1
393
+
394
+ # print(f"Created {len(messages)} messages with {image_count} image paths")
395
+
396
+ return messages
397
+
398
+ def take_snapshot_callback(self, memory_step: ActionStep, agent=None) -> None:
399
+ """Callback that takes a screenshot + memory snapshot after a step completes"""
400
+ current_step = memory_step.step_number
401
+ print(f"Taking screenshot for step {current_step}")
402
+ try:
403
+ time.sleep(2.0) # Let things happen on the desktop
404
+ screenshot_bytes = self.desktop.screenshot()
405
+ image = Image.open(BytesIO(screenshot_bytes))
406
+
407
+ # Create a filename with step number
408
+ screenshot_path = os.path.join(self.data_dir, f"step_{current_step:03d}.png")
409
+ image.save(screenshot_path)
410
+ print(f"Saved screenshot to {screenshot_path}")
411
+
412
+ for previous_memory_step in agent.memory.steps: # Remove previous screenshots from logs for lean processing
413
+ if isinstance(previous_memory_step, ActionStep) and previous_memory_step.step_number <= current_step - 2:
414
+ previous_memory_step.observations_images = None
415
+
416
+ # Add to the current memory step
417
+ # memory_step.observations_images = [image.copy()] # This takes the original image directly.
418
+ memory_step.observations_images = [screenshot_path]
419
+
420
+ #Storing memory and metadata to file:
421
+ self.store_metadata_to_file(agent)
422
+
423
+
424
+ except Exception as e:
425
+ print(f"Error taking screenshot: {e}")
426
+
427
+ def close(self):
428
+ """Clean up resources"""
429
+ if self.desktop:
430
+ print("Stopping e2b stream...")
431
+ self.desktop.stream.stop()
432
+
433
+ print("Killing e2b sandbox...")
434
+ self.desktop.kill()
435
+ print("E2B sandbox terminated")
436
+
437
+
438
+
439
+ class QwenVLAPIModel(Model):
440
+ """Model wrapper for Qwen2.5VL API"""
441
+
442
+ def __init__(
443
+ self,
444
+ model_path: str = "Qwen/Qwen2.5-VL-72B-Instruct",
445
+ provider: str = "hyperbolic"
446
+ ):
447
+ super().__init__()
448
+ self.model_path = model_path
449
+ self.model_id = model_path
450
+ self.provider = provider
451
+
452
+ self.client = InferenceClient(
453
+ provider=self.provider,
454
+ )
455
+
456
+ def __call__(
457
+ self,
458
+ messages: List[Dict[str, Any]],
459
+ stop_sequences: Optional[List[str]] = None,
460
+ **kwargs
461
+ ) -> ChatMessage:
462
+ """Convert a list of messages to an API request and return the response"""
463
+ # # Count images in messages - debug
464
+ # image_count = 0
465
+ # for msg in messages:
466
+ # if isinstance(msg.get("content"), list):
467
+ # for item in msg["content"]:
468
+ # if isinstance(item, dict) and item.get("type") == "image":
469
+ # image_count += 1
470
+
471
+ # print(f"QwenVLAPIModel received {len(messages)} messages with {image_count} images")
472
+
473
+ # Format the messages for the API
474
+
475
+ formatted_messages = []
476
+
477
+ for msg in messages:
478
+ role = msg["role"]
479
+ if isinstance(msg["content"], list):
480
+ content = []
481
+ for item in msg["content"]:
482
+ if item["type"] == "text":
483
+ content.append({"type": "text", "text": item["text"]})
484
+ elif item["type"] == "image":
485
+ # Handle image path or direct image object
486
+ if isinstance(item["image"], str):
487
+ # Image is a path
488
+ with open(item["image"], "rb") as image_file:
489
+ base64_image = base64.b64encode(image_file.read()).decode("utf-8")
490
+ else:
491
+ # Image is a PIL image or similar object
492
+ img_byte_arr = io.BytesIO()
493
+ item["image"].save(img_byte_arr, format="PNG")
494
+ base64_image = base64.b64encode(img_byte_arr.getvalue()).decode("utf-8")
495
+
496
+ content.append({
497
+ "type": "image_url",
498
+ "image_url": {
499
+ "url": f"data:image/png;base64,{base64_image}"
500
+ }
501
+ })
502
+ else:
503
+ content = [{"type": "text", "text": msg["content"]}]
504
+
505
+ formatted_messages.append({"role": role, "content": content})
506
+
507
+ # Make the API request
508
+ completion = self.client.chat.completions.create(
509
+ model=self.model_path,
510
+ messages=formatted_messages,
511
+ max_tokens=kwargs.get("max_new_tokens", 512),
512
+ temperature=kwargs.get("temperature", 0.7),
513
+ top_p=kwargs.get("top_p", 0.9),
514
+ )
515
+
516
+ # Extract the response text
517
+ output_text = completion.choices[0].message.content
518
+
519
+ return ChatMessage(role=MessageRole.ASSISTANT, content=output_text)
520
+
521
+ def to_dict(self) -> Dict[str, Any]:
522
+ """Convert the model to a dictionary"""
523
+ return {
524
+ "class": self.__class__.__name__,
525
+ "model_path": self.model_path,
526
+ "provider": self.provider,
527
+ # We don't save the API key for security reasons
528
+ }
529
+
530
+ @classmethod
531
+ def from_dict(cls, data: Dict[str, Any]) -> "QwenVLAPIModel":
532
+ """Create a model from a dictionary"""
533
+ return cls(
534
+ model_path=data.get("model_path", "Qwen/Qwen2.5-VL-72B-Instruct"),
535
+ provider=data.get("provider", "hyperbolic"),
536
+ )
requirements.txt CHANGED
@@ -1 +1,4 @@
1
- e2b_desktop
 
 
 
 
1
+ e2b_desktop
2
+ smolagents
3
+ Pillow
4
+ huggingface_hub