BoyuNLP commited on
Commit
098b73b
1 Parent(s): 2e51443
llava/eval/seeact/__init__.py DELETED
@@ -1,14 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) 2024 OSU Natural Language Processing Group
3
- #
4
- # Licensed under the OpenRAIL-S License;
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # https://www.licenses.ai/ai-pubs-open-rails-vz1
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava/eval/seeact/agent.py DELETED
@@ -1,945 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) 2024 OSU Natural Language Processing Group
3
- #
4
- # Licensed under the OpenRAIL-S License;
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # https://www.licenses.ai/ai-pubs-open-rails-vz1
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import logging
17
- import os
18
- import traceback
19
- from datetime import datetime
20
- import json
21
- import toml
22
- import torch
23
- from playwright.async_api import async_playwright,Locator
24
-
25
- from .data_utils.format_prompt_utils import get_index_from_option_name, generate_new_query_prompt, \
26
- generate_new_referring_prompt, format_options
27
- from .demo_utils.browser_helper import normal_launch_async, normal_new_context_async, \
28
- get_interactive_elements_with_playwright, select_option, saveconfig, get_select_elements_with_playwright
29
- from .demo_utils.format_prompt import format_choices, postprocess_action_lmm
30
- from .demo_utils.inference_engine import engine_factory
31
-
32
-
33
-
34
- from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
35
- from llava.conversation import conv_templates
36
- from llava.model.builder import load_pretrained_model
37
- from llava.utils import disable_torch_init
38
- from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
39
-
40
- from PIL import Image, ImageDraw,ImageFont
41
-
42
- import numpy as np
43
-
44
- import asyncio
45
-
46
-
47
- def average_color(image, bbox):
48
- """计算指定区域的平均颜色。"""
49
- region = image.crop(bbox)
50
- numpy_image = np.array(region)
51
- avg_color = np.mean(numpy_image, axis=(0, 1))
52
- return tuple(avg_color)
53
-
54
-
55
- def color_contrast(color1, color2):
56
- """计算两种颜色之间的对比度。"""
57
- l1 = 0.2126 * pow(color1[0]/255, 2.2) + \
58
- 0.7152 * pow(color1[1]/255, 2.2) + \
59
- 0.0722 * pow(color1[2]/255, 2.2)
60
- l2 = 0.2126 * pow(color2[0]/255, 2.2) + \
61
- 0.7152 * pow(color2[1]/255, 2.2) + \
62
- 0.0722 * pow(color2[2]/255, 2.2)
63
- if l1 > l2:
64
- return (l1 + 0.05) / (l2 + 0.05)
65
- else:
66
- return (l2 + 0.05) / (l1 + 0.05)
67
-
68
- def text_color_for_background(background_color):
69
- """选择最佳的文本颜色基于背景颜色。"""
70
- red = (255, 0, 0)
71
- blue = (0, 0, 255)
72
- contrast_red = color_contrast(background_color, red)
73
- contrast_blue = color_contrast(background_color, blue)
74
- if contrast_red > contrast_blue:
75
- return red
76
- else:
77
- return blue
78
-
79
-
80
- def draw_text(draw, text, position, font, max_width, image):
81
- """在图像上绘制自动换行的文本,并根据背景色调整文本颜色。"""
82
- x, y = position
83
- words = text.split()
84
- current_line = ""
85
-
86
- # 使用一个空字符串来获取单行文字的高度
87
- line_height = 40
88
-
89
- for word in words:
90
- test_line = f"{current_line} {word}".strip()
91
- width, _ = 40,40 # 正确调用font对象的getsize方法
92
- if width <= max_width:
93
- current_line = test_line
94
- else:
95
- bbox = (x, y, x + width, y + line_height)
96
- bg_color = average_color(image, bbox)
97
- color = text_color_for_background(bg_color)
98
- draw.text((x, y), current_line, font=font, fill=color)
99
- y += line_height
100
- current_line = word
101
- if current_line:
102
- bbox = (x, y, x + width, y + line_height)
103
- bg_color = average_color(image, bbox)
104
- color = text_color_for_background(bg_color)
105
- draw.text((x, y), current_line, font=font, fill=color)
106
-
107
-
108
- class SeeActAgent:
109
- def __init__(self,
110
- config_path=None,
111
- save_file_dir="seeact_agent_files",
112
- save_task_id=None,
113
- default_task='Search for the flight status for the flight AA 3942 leaving on Jun. 10"',
114
- default_website="https://www.aa.com/homePage.do",
115
- input_info=["screenshot"],
116
- grounding_strategy="text_choice",
117
- max_auto_op=50,
118
- max_continuous_no_op=5,
119
- highlight=False,
120
- headless=False,
121
- args=[],
122
- browser_app="chrome",
123
- persistant=False,
124
- persistant_user_path="",
125
- save_video=False,
126
- viewport={
127
- "width": 1280,
128
- "height": 960
129
- },
130
- tracing=False,
131
- trace={
132
- "screenshots": True,
133
- "snapshots": True,
134
- "sources": True
135
- },
136
- rate_limit=-1,
137
- model="gpt-4-turbo",
138
- temperature=0.9
139
-
140
- ):
141
-
142
- try:
143
- if config_path is not None:
144
- with open(config_path,
145
- 'r') as config:
146
- print(f"Configuration File Loaded - {config_path}")
147
- config = toml.load(config)
148
- else:
149
- config = {
150
- "basic": {
151
- "save_file_dir": save_file_dir,
152
- "default_task": default_task,
153
- "default_website": default_website
154
- },
155
- "agent": {
156
- "input_info": input_info,
157
- "grounding_strategy": grounding_strategy,
158
- "max_auto_op": max_auto_op,
159
- "max_continuous_no_op": max_continuous_no_op,
160
- "highlight": highlight
161
- },
162
- "openai": {
163
- "rate_limit": rate_limit,
164
- "model": model,
165
- "temperature": temperature
166
- }
167
- }
168
- config.update({
169
- "browser": {
170
- "headless": headless,
171
- "args": args,
172
- "browser_app": browser_app,
173
- "persistant": persistant,
174
- "persistant_user_path": persistant_user_path,
175
- "save_video": save_video,
176
- "viewport": viewport,
177
- "tracing": tracing,
178
- "trace": trace
179
- }
180
- })
181
-
182
- except FileNotFoundError:
183
- print(f"Error: File '{os.path.abspath(config_path)}' not found.")
184
- except toml.TomlDecodeError:
185
- print(f"Error: File '{os.path.abspath(config_path)}' is not a valid TOML file.")
186
-
187
- self.config = config
188
- self.complete_flag = False
189
- self.session_control = {
190
- 'active_page': None,
191
- 'context': None,
192
- 'browser': None
193
- }
194
- self.tasks = [self.config["basic"]["default_task"]]
195
- if save_task_id:
196
- self.main_path = os.path.join(self.config["basic"]["save_file_dir"],
197
- save_task_id)
198
-
199
-
200
- else:
201
- self.main_path = os.path.join(self.config["basic"]["save_file_dir"], datetime.now().strftime("%Y%m%d_%H%M%S"))
202
- if os.path.exists(self.main_path):
203
- self.complete_flag=True
204
-
205
- os.makedirs(self.main_path, exist_ok=True)
206
- self.action_space = ["CLICK", "PRESS ENTER", "HOVER", "SCROLL UP", "SCROLL DOWN", "NEW TAB", "CLOSE TAB",
207
- "GO BACK", "GO FORWARD",
208
- "TERMINATE", "SELECT", "TYPE", "GOTO", "MEMORIZE"] # Define the list of actions here
209
-
210
- self.no_value_op = ["CLICK", "PRESS ENTER", "HOVER", "SCROLL UP", "SCROLL DOWN", "NEW TAB", "CLOSE TAB",
211
- "PRESS HOME", "PRESS END", "PRESS PAGEUP", "PRESS PAGEDOWN"
212
- "GO BACK",
213
- "GO FORWARD",
214
- "TERMINATE", "NONE"]
215
-
216
- self.with_value_op = ["SELECT", "TYPE", "GOTO", "MEMORIZE", "SAY"]
217
-
218
- self.no_element_op = ["PRESS ENTER", "SCROLL UP", "SCROLL DOWN", "NEW TAB", "CLOSE TAB", "GO BACK", "GOTO",
219
- "PRESS HOME", "PRESS END", "PRESS PAGEUP", "PRESS PAGEDOWN",
220
- "GO FORWARD",
221
- "TERMINATE", "NONE", "MEMORIZE", "SAY"]
222
-
223
- # Initialize the primary logger and the developer logger
224
- self.logger = self._setup_logger(redirect_to_dev_log=False)
225
- # self.dev_logger = self._setup_dev_logger()
226
-
227
- # # Redirect primary logger messages to dev_logger as well
228
- # for handler in self.logger.handlers:
229
- # self.dev_logger.addHandler(handler)
230
-
231
- self.engine = engine_factory(**self.config['openai'])
232
- self.taken_actions = []
233
- self.prompts = self._initialize_prompts()
234
- self.time_step = 0
235
- self.valid_op = 0
236
- # self.error=0
237
- self.continuous_no_op = 0
238
- self.predictions=[]
239
-
240
- disable_torch_init()
241
- self.pixui_model_path = os.path.expanduser(
242
- "/fs/ess/PAS1576/boyu_gou/train_vlm/ui_llava_fine_tune/checkpoints/only-web/merged-llava-v1.5-vicuna-7b-16k-pad-no-fusion-web-80k")
243
- self.pixui_model_name = get_model_name_from_path(self.pixui_model_path)
244
- self.pixui_tokenizer, self.pixui_model, self.pixui_image_processor, self.pixui_context_len = load_pretrained_model(self.pixui_model_path, None, self.pixui_model_name)
245
-
246
- def _initialize_prompts(self):
247
- """Initialize prompt information including dynamic action space."""
248
- action_format = f"" # Dynamically generate action_format based on self.action_space
249
-
250
- return {
251
- "system_prompt": '''You are assisting humans doing web navigation tasks step by step. At each stage, you can see the webpage by a screenshot and know the previous actions before the current step decided by yourself that have been executed for this task through recorded history. You need to decide on the first following action to take.''',
252
-
253
- "action_space": '''
254
- Here are the descriptions of all allowed actions:
255
-
256
- No Value Operations:
257
- - CLICK: Click on a webpage element using the mouse.
258
- - PRESS ENTER: Press the Enter key, typically to submit a form or confirm an input.
259
- - SCROLL UP: Scroll the webpage upwards by half of the window height.
260
- - SCROLL DOWN: Scroll the webpage downwards by half of the window height.
261
- - PRESS HOME: Scroll to the top of the webpage.
262
- - PRESS END: Scroll to the bottom of the webpage.
263
- - PRESS PAGEUP: Scroll up by one window height.
264
- - PRESS PAGEDOWN: Scroll down by one window height.
265
- - GO BACK: Navigate to the previous page in the browser history.
266
- - GO FORWARD: Navigate to the next page in the browser history.
267
- - TERMINATE: End the current task, typically used when the task is considered complete or requires potentially harmful actions.
268
- - NONE: Indicates that no action is necessary at this stage. Used to skip an action or wait.
269
-
270
- With Value Operations:
271
- - SELECT: Choose an option from a dropdown menu or <select> element. The value indicates the option to select.
272
- - TYPE: Enter text into a text area or text box. The value is the text to be typed.
273
- ''',
274
-
275
- "question_description": '''The screenshot below shows the webpage you see. Think step by step before outlining the next action step at the current stage. Clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation.
276
-
277
- To be successful, it is important to follow the following rules:
278
- 1. You should only issue a valid action given the current observation.
279
- 2. You should only issue one action at a time
280
- 3. Unlike humans, since you are using playwright APIs, for typing (e.g., in text areas, text boxes) and selecting (e.g., from dropdown menus or <select> elements), you should try directly typing the input or selecting the choice, bypassing the need for an initial click.
281
- 4. You should not attempt to create accounts, log in or do the final submission.
282
- 5. Terminate when you deem the task complete or if it requires potentially harmful actions.
283
- 6. Details of <select> elements will be provided, to help you figure out the exact choice text to be chosen if the action is a SELECT.
284
- 7. If you find there are one or more failed actions in the most recent actions, you should change the description and make your descriptions more precise and concise (and at least do not repeat the latest description.).
285
-
286
- (Final Answer)
287
- Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element choice, action, and value should be in three separate lines.
288
-
289
- Format:
290
-
291
- ELEMENT: The description of the target element to locate the element, if the action involves a specific element. Otherwise write "None"
292
-
293
- ACTION: Choose an action from allowed actions.
294
-
295
- VALUE: Provide additional input based on ACTION. (If it doesn't involve a value, write "None"''',
296
-
297
- "referring_description": f"""(Reiteration)
298
- First, reiterate your next target element, its detailed location, and the corresponding operation.""",
299
-
300
- "element_format": '''''',
301
-
302
- "action_format": action_format, # Use the dynamically generated action_format
303
-
304
- "value_format": ''''''
305
- }
306
-
307
- def update_action_space(self, new_actions):
308
- """Update the action space and regenerate the action_format prompt."""
309
- if isinstance(new_actions, list) and all(isinstance(item, str) for item in new_actions):
310
- self.action_space = new_actions
311
- self.prompts["action_format"] = f"ACTION: Choose an action from {{{', '.join(self.action_space)}}}."
312
- else:
313
- print("Invalid action space provided. It must be a list of strings.")
314
-
315
- def _setup_logger(self, redirect_to_dev_log=False):
316
- """Set up a logger to log to both file and console within the main_path."""
317
- logger_name = 'SeeActAgent'
318
- logger = logging.getLogger(logger_name)
319
- logger.setLevel(logging.INFO)
320
- if not logger.handlers: # Avoid adding handlers multiple times
321
- # Create a file handler for writing logs to a file
322
- log_filename = 'agent.log'
323
- f_handler = logging.FileHandler(os.path.join(self.main_path, log_filename))
324
- f_handler.setLevel(logging.INFO)
325
-
326
- # Create a console handler for printing logs to the terminal
327
- c_handler = logging.StreamHandler()
328
- c_handler.setLevel(logging.INFO)
329
-
330
- # Create formatters for file and console handlers
331
- file_formatter = logging.Formatter('%(asctime)s - %(message)s')
332
- console_formatter = logging.Formatter('%(message)s')
333
-
334
- # Set formatters for file and console handlers
335
- f_handler.setFormatter(file_formatter)
336
- c_handler.setFormatter(console_formatter)
337
-
338
- # Add the handlers to the logger
339
- logger.addHandler(f_handler)
340
- if not redirect_to_dev_log: # Only add console handler if not redirecting to dev log
341
- logger.addHandler(c_handler)
342
-
343
- return logger
344
-
345
- # def _setup_dev_logger(self):
346
- # """Set up a developer logger to log only to a file within the main_path."""
347
- # dev_logger_name = 'SeeActAgentDev'
348
- # dev_logger = logging.getLogger(dev_logger_name)
349
- # dev_logger.setLevel(logging.INFO)
350
- # if not dev_logger.handlers: # Avoid adding handlers multiple times
351
- # # Create a file handler for writing logs to a dev log file
352
- # dev_log_filename = 'dev_agent.log'
353
- # f_handler = logging.FileHandler(os.path.join(self.main_path, dev_log_filename))
354
- # f_handler.setLevel(logging.INFO)
355
- #
356
- # # Create a formatter and add it to the handler
357
- # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
358
- # f_handler.setFormatter(formatter)
359
- #
360
- # # Add the file handler to the dev logger
361
- # dev_logger.addHandler(f_handler)
362
- #
363
- # return dev_logger
364
-
365
- async def page_on_close_handler(self):
366
- # Corrected to use 'self' for accessing class attributes
367
- if self.session_control['context']:
368
- try:
369
- await self.session_control['active_page'].title()
370
- except:
371
- self.logger.info(
372
- "The active tab was closed. Will switch to the last page (or open a new default google page)")
373
- if self.session_control['context'].pages:
374
- self.session_control['active_page'] = self.session_control['context'].pages[-1]
375
- await self.session_control['active_page'].bring_to_front()
376
- self.logger.info(f"Switched the active tab to: {self.session_control['active_page'].url}")
377
- else:
378
- self.session_control['active_page'] = await self.session_control['context'].new_page()
379
- try:
380
- await self.session_control['active_page'].goto("https://www.google.com/", wait_until="load")
381
- except Exception as e:
382
- self.logger.info(f"Failed to navigate to Google: {e}")
383
- self.logger.info(f"Switched the active tab to: {self.session_control['active_page'].url}")
384
-
385
- def save_action_history(self, filename="action_history.txt"):
386
- """Save the history of taken actions to a file in the main path."""
387
- history_path = os.path.join(self.main_path, filename)
388
- with open(history_path, 'w') as f:
389
- for action in self.taken_actions:
390
- f.write(action + '\n')
391
- self.logger.info(f"Action history saved to: {history_path}")
392
-
393
- async def page_on_navigation_handler(self, frame):
394
- # Corrected to use 'self' for accessing class attributes
395
- self.session_control['active_page'] = frame.page
396
-
397
- async def page_on_crash_handler(self, page):
398
- # Corrected logging method
399
- self.logger.info(f"Page crashed: {page.url}")
400
- self.logger.info("Try to reload")
401
- await page.reload()
402
-
403
- async def page_on_open_handler(self, page):
404
- # Added 'self' to the handler functions to reference the current instance of the class
405
- page.on("framenavigated", self.page_on_navigation_handler)
406
- page.on("close", self.page_on_close_handler)
407
- page.on("crash", self.page_on_crash_handler)
408
- self.session_control['active_page'] = page
409
- # Additional event listeners can be added here
410
-
411
- async def start(self, headless=None, args=None, website=None):
412
- self.playwright = await async_playwright().start()
413
- self.session_control['browser'] = await normal_launch_async(self.playwright,
414
- headless=self.config['browser'][
415
- 'headless'] if headless is None else headless,
416
- args=self.config['browser'][
417
- 'args'] if args is None else args)
418
- self.session_control['context'] = await normal_new_context_async(self.session_control['browser'],
419
- viewport=self.config['browser'][
420
- 'viewport'])
421
-
422
- self.session_control['context'].on("page", self.page_on_open_handler)
423
- await self.session_control['context'].new_page()
424
-
425
- try:
426
- await self.session_control['active_page'].goto(
427
- self.config['basic']['default_website'] if website is None else website,
428
- wait_until="load")
429
- self.logger.info(f"Loaded website: {self.config['basic']['default_website']}")
430
- except Exception as e:
431
- self.logger.info("Failed to fully load the webpage before timeout")
432
- self.logger.info(e)
433
-
434
- # await asyncio.sleep(2)
435
-
436
- def update_prompt_part(self, part_name, new_text):
437
- """Update the specified part of the prompt information."""
438
- if part_name in self.prompts:
439
- self.prompts[part_name] = new_text
440
- return True
441
- else:
442
- print(f"Prompt part '{part_name}' not found.")
443
- return False
444
-
445
- def generate_prompt(self, task=None, previous=None, choices=None):
446
-
447
- """Generate a prompt based on the current task, previous actions, and choices."""
448
- # assert task is not None, "Please input the task."
449
-
450
- prompt_list = []
451
-
452
- system_prompt_input = self.prompts["system_prompt"]
453
- action_space_input = self.prompts["action_space"]
454
- question_description_input = self.prompts["question_description"]
455
- referring_input = self.prompts["referring_description"]
456
- element_format_input = self.prompts["element_format"]
457
- action_format_input = self.prompts["action_format"]
458
- value_format_input = self.prompts["value_format"]
459
-
460
- # print(previous)
461
-
462
- previous_ = self.taken_actions if self.taken_actions else None
463
-
464
- # print(previous_)
465
-
466
- prompt_list.extend(
467
- generate_new_query_prompt(system_prompt=system_prompt_input + "\n" + action_space_input,
468
- task=self.tasks[-1], previous_actions=previous_,
469
- question_description=question_description_input,select_elements=choices))
470
- prompt_list.append(
471
- generate_new_referring_prompt(referring_description=referring_input, element_format=element_format_input,
472
- action_format=action_format_input, value_format=value_format_input,
473
- choices=None))
474
-
475
- return prompt_list
476
-
477
- async def perform_action(self, target_element=None, action_name=None, value=None, element_repr=""):
478
- if target_element is not None:
479
- selector = target_element['selector']
480
- element_repr =target_element['description']
481
- else:
482
- selector = None
483
-
484
- page = self.session_control['active_page']
485
-
486
-
487
-
488
- if action_name == "CLICK" and selector:
489
- # await selector.click(timeout=2000)
490
- await self.session_control['active_page'].mouse.click(x=selector[0], y=selector[1])
491
- self.logger.info(f"Clicked on element: {element_repr}")
492
- elif action_name == "HOVER" and selector:
493
- # await selector.hover(timeout=2000)
494
- await self.session_control['active_page'].mouse.move(x=selector[0], y=selector[1])
495
- self.logger.info(f"Hovered over element: {element_repr}")
496
- elif action_name == "TYPE" and selector:
497
- # await selector.fill(value)
498
- # await selector.fill(value)
499
- await self.session_control['active_page'].mouse.click(x=selector[0], y=selector[1])
500
- await page.keyboard.type(value)
501
-
502
- self.logger.info(f"Typed '{value}' into element: {element_repr}")
503
- elif action_name == "SCROLL UP":
504
- await page.evaluate(f"window.scrollBy(0, -{self.config['browser']['viewport']['height'] // 2});")
505
- self.logger.info("Scrolled up")
506
- elif action_name == "SCROLL DOWN":
507
- await page.evaluate(f"window.scrollBy(0, {self.config['browser']['viewport']['height'] // 2});")
508
- self.logger.info("Scrolled down")
509
- elif action_name == "PRESS HOME":
510
- await page.keyboard.press('Home')
511
- self.logger.info("Pressed Home key")
512
- elif action_name == "PRESS END":
513
- await page.keyboard.press('End')
514
- self.logger.info("Pressed End key")
515
- elif action_name == "PRESS PAGEUP":
516
- await page.keyboard.press('PageUp')
517
- self.logger.info("Pressed PageUp key")
518
- elif action_name == "PRESS PAGEDOWN":
519
- await page.keyboard.press('PageDown')
520
- self.logger.info("Pressed PageDown key")
521
- elif action_name == "NEW TAB":
522
- new_page = await self.session_control['context'].new_page()
523
- # self.session_control['pages'].append(new_page)
524
- self.logger.info("Opened a new tab")
525
- elif action_name == "CLOSE TAB":
526
- await page.close()
527
- self.logger.info("Closed the current tab")
528
- elif action_name == "GO BACK":
529
- await page.go_back()
530
- self.logger.info("Navigated back")
531
- elif action_name == "GO FORWARD":
532
- await page.go_forward()
533
- self.logger.info("Navigated forward")
534
- elif action_name == "GOTO" and value:
535
- await page.goto(value, wait_until="load")
536
- self.logger.info(f"Navigated to {value}")
537
- # elif action_name == "PRESS ENTER" and selector:
538
- # await selector.press('Enter')
539
- # self.logger.info(f"Pressed Enter on element: {element_repr}")
540
- elif action_name == "PRESS ENTER":
541
- await page.keyboard.press('Enter')
542
- self.logger.info(f"Pressed Enter on element: {element_repr}")
543
- elif action_name == "SELECT" and selector:
544
- await select_option(selector, value)
545
- self.logger.info(f"Selected option '{value}' from element: {element_repr}")
546
- elif action_name == "TERMINATE":
547
- self.complete_flag = True
548
- self.logger.info("Task has been marked as complete. Terminating...")
549
- elif action_name in ["NONE"]:
550
- self.logger.info("No action necessary at this stage. Skipped")
551
- elif action_name in ["SAY"]:
552
- self.logger.info(f"Say {value} to the user")
553
- elif action_name in ["MEMORIZE"]:
554
- self.logger.info(f"Keep {value} to the action history.")
555
- else:
556
- raise Exception(f"Unsupported or improperly specified action: {action_name}")
557
- if action_name in self.no_element_op and target_element is None:
558
- new_action = action_name
559
- else:
560
- # new_action = "[" + target_element['tag_with_role'] + "]" + " "
561
- new_action = target_element['description'] + " -> " + action_name
562
- if action_name in self.with_value_op:
563
- new_action += ": " + value
564
-
565
- # self.dev_logger.info(new_action)
566
- return new_action
567
-
568
- async def predict(self):
569
-
570
- """
571
- Generate a prediction for the next action based on the webpage elements and previous actions.
572
- """
573
-
574
- self.time_step += 1
575
-
576
- try:
577
- await self.session_control["active_page"].wait_for_load_state('load')
578
- except Exception as e:
579
- pass
580
-
581
- # elements = await get_interactive_elements_with_playwright(self.session_control['active_page'],
582
- # self.config['browser']['viewport'])
583
- elements = None
584
-
585
- '''
586
- 0: center_point =(x,y)
587
- 1: description
588
- 2: tag_with_role: tag_head with role and type # TODO: Consider adding more
589
- 3. box
590
- 4. selector
591
- 5. tag
592
- '''
593
-
594
- # elements = sorted(elements, key=lambda el: (
595
- # el["center_point"][1], el["center_point"][0])) # Sorting by y and then x coordinate
596
-
597
- # Generate choices for the prompt
598
-
599
- # , self.config['basic']['default_task'], self.taken_actions
600
- # choices = format_choices(elements)
601
-
602
- select_elements = await get_select_elements_with_playwright(self.session_control['active_page'],
603
- self.config['browser']['viewport'],
604
- )
605
-
606
- select_elements_formated=format_choices(select_elements)
607
-
608
- # print("\n\n",choices)
609
- prompt = self.generate_prompt(task=self.tasks[-1], previous=self.taken_actions, choices=select_elements_formated)
610
- # print("\n\n",prompt)
611
-
612
- # Logging prompt for debugging
613
-
614
- # Capture a screenshot for the current state of the webpage, if required by the model
615
- screenshot_path = os.path.join(self.main_path, 'screenshots', f'screen_{self.time_step}.png')
616
- try:
617
- await self.session_control['active_page'].screenshot(path=screenshot_path)
618
- except Exception as e:
619
- self.logger.info(f"Failed to take screenshot: {e}")
620
-
621
- terminal_width = 10
622
- self.logger.info(f"Step - {self.time_step}\n{'-'*terminal_width}\nAction Generation ➡️")
623
- # for prompt_part in prompt:
624
- self.logger.info("TASK: "+self.tasks[-1])
625
- self.logger.info("Previous:")
626
- for action in self.taken_actions:
627
- self.logger.info(action)
628
-
629
- output0 = self.engine.generate(prompt=prompt, image_path=screenshot_path, turn_number=0)
630
-
631
- terminal_width = 10
632
- self.logger.info("-" * terminal_width)
633
- self.logger.info("🤖 Action Generation Output 🤖")
634
-
635
- for line in output0.split('\n'):
636
- self.logger.info(line)
637
-
638
- terminal_width = 10
639
- self.logger.info("-" * (terminal_width))
640
-
641
- # choice_text = f"Action Grounding ➡️" + "\n" + format_options(
642
- # choices)
643
- # choice_text = choice_text.replace("\n\n", "")
644
- #
645
- # for line in choice_text.split('\n'):
646
- # self.logger.info(line)
647
-
648
- # output = self.engine.generate(prompt=prompt, image_path=screenshot_path, turn_number=1,
649
- # ouput_0=output0)
650
-
651
- output=""
652
- self.logger.info("🤖 Action Grounding Output 🤖")
653
- for line in output.split('\n'):
654
- self.logger.info(line)
655
-
656
- pred_element_label, pred_action, pred_value = postprocess_action_lmm(output0)
657
-
658
- # print(pred_element_label)
659
- # print(pred_action)
660
- # print(pred_value)
661
- # exit()
662
-
663
- # if len(pred_element_label) in [1, 2]:
664
- # element_id = get_index_from_option_name(pred_element_label)
665
- # else:
666
- # element_id = None
667
- pred_element = pred_element_label
668
-
669
- def get_scale_factor(original_size):
670
- original_width, original_height = original_size
671
- new_width = min(nearest_multiple_of_224_at_least_224(original_width, ceiling=False), 1344)
672
- scale_factor = new_width / original_width
673
- return scale_factor
674
-
675
- def nearest_multiple_of_224_at_least_224(num, ceiling=False):
676
- if num <= 224:
677
- return 224
678
- division, remainder = divmod(num, 224)
679
- if ceiling and remainder > 0:
680
- return (division + 1) * 224
681
- if remainder < 112:
682
- return division * 224
683
- else:
684
- return (division + 1) * 224
685
-
686
-
687
-
688
- image_file = screenshot_path
689
-
690
- qs = f"In the screenshot, where are the pixel coordinates (x, y) of the element corresponding to \"{pred_element}\"?"
691
- cur_prompt = qs
692
- if self.pixui_model.config.mm_use_im_start_end:
693
- qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
694
- else:
695
- qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
696
-
697
- conv = conv_templates["llava_v1"].copy()
698
- conv.append_message(conv.roles[0], qs)
699
- conv.append_message(conv.roles[1], None)
700
- prompt = conv.get_prompt()
701
-
702
- input_ids = tokenizer_image_token(prompt, self.pixui_tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
703
-
704
- image = Image.open(os.path.join(image_file)).convert('RGB')
705
- # print("DEBUG",model.config)
706
- image_tensor, image_new_size = process_images([image], self.pixui_image_processor, self.pixui_model.config)
707
- # image_tensor,image_new_size = process_images([image], image_processor, model.config)[0]
708
-
709
- with torch.inference_mode():
710
- output_ids = self.pixui_model.generate(
711
- input_ids,
712
- # images=image_tensor.unsqueeze(0).half().cuda(),
713
- images=image_tensor.half().cuda(),
714
- image_sizes=[image_new_size],
715
- do_sample=False,
716
- temperature=0,
717
- top_p=None,
718
- num_beams=1,
719
- # no_repeat_ngram_size=3,
720
- max_new_tokens=16384,
721
- use_cache=True)
722
-
723
- outputs = self.pixui_tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
724
-
725
-
726
- # print("predicted coordinate: ",outputs)
727
-
728
-
729
- grounding_image=Image.open(screenshot_path)
730
-
731
-
732
-
733
-
734
-
735
-
736
-
737
- scale=get_scale_factor(grounding_image.size)
738
-
739
- coord = eval(outputs)
740
- coord = tuple((i / scale for i in coord))
741
-
742
- elements_at_point= await get_select_elements_with_playwright(self.session_control['active_page'],self.config['browser']['viewport'])
743
-
744
-
745
- print(elements_at_point)
746
-
747
- if pred_action=="SELECT":
748
- import math
749
- def euclidean_distance(coord1, coord2):
750
- return math.sqrt((coord1[0] - coord2[0]) ** 2 + (coord1[1] - coord2[1]) ** 2)
751
-
752
- min_distance = float('inf')
753
- closest_element = None
754
-
755
- # 遍历所有元素,找到最近的
756
- for element in elements_at_point:
757
- distance = euclidean_distance(coord, element['center_point'])
758
- if distance < min_distance:
759
- min_distance = distance
760
- closest_element = element
761
- pred_element=closest_element
762
- pred_element['description']=pred_element_label
763
-
764
-
765
-
766
- # print(scale)
767
-
768
- # print(coord)
769
-
770
- annotated_screenshot_path = os.path.join(self.main_path, 'screenshots', f'screen_{self.time_step}_grounding.png')
771
- draw = ImageDraw.Draw(image)
772
-
773
- # prompt = self.generate_prompt(task=self.tasks[-1], previous=self.taken_actions,
774
- # choices=select_elements_formated)
775
- #
776
- # output = self.engine.generate(prompt=prompt, image_path=screenshot_path, turn_number=1,
777
- # ouput_0=output0)
778
-
779
-
780
- # i=pred_element
781
- # print(i["description"])
782
- # print()
783
- # box=i['box']
784
- # left = box[0]
785
- # top = box[1]
786
- # right = box[0] + box[2]
787
- # bottom = box[1] + box[3]
788
- # # draw = ImageDraw.Draw(image2)
789
- # # 绘制红色边界框
790
- # draw.rectangle([left, top, right, bottom], outline="red", width=4)
791
- x, y = coord
792
- # 定义圆点的半径
793
- radius = 7
794
- # 绘制红色圆点
795
- draw.ellipse((x - radius, y - radius, x + radius, y + radius), fill="blue")
796
- # 显示图片
797
- # image2.show()
798
-
799
- text = pred_element_label
800
-
801
- # 选择字体和字号
802
- font = ImageFont.truetype("/fs/ess/PAS1576/boyu_gou/train_vlm/ui_llava_fine_tune/llava/eval/Roboto-Medium.ttf", 36)
803
-
804
- # 计算文本的大小
805
- # text_width, text_height = draw.textsize(text, font=font)
806
-
807
- # 设置文本的位置(左上角)
808
- x = 0
809
- y = 0
810
-
811
- # 在图片上写入文本
812
- max_width = image.width
813
-
814
- draw_text(draw, text+str(coord), (0, 0), font, max_width, image)
815
-
816
- image.save(fp=annotated_screenshot_path)
817
- image.close()
818
-
819
-
820
- # exit()
821
-
822
- # Log the prediction result
823
- self.logger.debug(f"Retrieved Answer")
824
- self.logger.debug(f"Predicted Element: {pred_element}")
825
- self.logger.debug(f"Action: {pred_action}")
826
- self.logger.debug(f"Value: {pred_value}")
827
-
828
- prediction={"action_generation": output0, "action_grounding": None, "element": {"center_point":coord,"description":pred_element_label,"tag_with_role":None,"box":None,"selector":coord,"tag":None},
829
- "action": pred_action, "value": pred_value}
830
-
831
- self.predictions.append(prediction)
832
-
833
- return {"action_generation": output0, "action_grounding": None, "element": {"center_point":coord,"description":pred_element_label,"tag_with_role":None,"box":None,"selector":coord,"tag":None} if pred_action!="SELECT" else pred_element,
834
- "action": pred_action, "value": pred_value}
835
-
836
-
837
-
838
- # return output0,output,pred_element, pred_action, pred_value
839
-
840
- async def execute(self, prediction_dict):
841
- """
842
- Execute the predicted action on the webpage.
843
- """
844
-
845
- pred_element = prediction_dict["element"]
846
- pred_action = prediction_dict["action"]
847
- pred_value = prediction_dict["value"]
848
- try:
849
- if (pred_action not in self.no_element_op) and pred_element == None:
850
- # self.dev_logger.info
851
- self.logger.info("DEBUG: WHAT IS PRED ACTION???:" + pred_action)
852
- # self.dev_logger.info("DEBUG WHAT IS self.no_element_op???:"+ self.no_element_op)
853
- pred_action = "NONE"
854
- new_action = await self.perform_action(pred_element, pred_action, pred_value)
855
- self.taken_actions.append(new_action)
856
- if pred_action != "NONE":
857
- self.valid_op += 1
858
- self.continuous_no_op = 0
859
- else:
860
- self.continuous_no_op += 1
861
- await asyncio.sleep(3)
862
- return 0
863
- except Exception as e:
864
-
865
- new_action = f"Failed to perform {pred_action} on {pred_element['description']} with value '{pred_value}': {e}"
866
-
867
-
868
- traceback_info = traceback.format_exc()
869
- error_message = f"Error executing action {pred_action}: {str(e)}"
870
- print(traceback_info)
871
- # exit()
872
- error_message_with_traceback = f"{error_message}\n\nTraceback:\n{traceback_info}"
873
-
874
- self.logger.info(new_action)
875
- self.taken_actions.append(new_action)
876
- self.continuous_no_op += 1
877
- await asyncio.sleep(3)
878
- return 1
879
-
880
- async def stop(self):
881
-
882
- try:
883
- close_context = self.session_control['context']
884
- self.session_control['context'] = None
885
- await close_context.close()
886
- self.logger.info("Browser context closed.")
887
- except Exception as e:
888
- self.logger.info(e)
889
-
890
- final_json = {"task": self.tasks, "website": self.config["basic"]["default_website"],
891
- "num_step": len(self.taken_actions), "action_history": self.taken_actions}
892
-
893
- def locator_serializer(obj):
894
- """Convert non-serializable objects to a serializable format."""
895
- if isinstance(obj, Locator):
896
- # Assuming Locator has attributes 'frame' and 'selector' you want to serialize
897
- return str(obj)
898
- raise TypeError(f"Object of type {obj.__class__.__name__} is not JSON serializable")
899
-
900
- # Using the custom default function in json.dump
901
- with open(os.path.join(self.main_path, 'all_predictions.json'), 'w', encoding='utf-8') as f:
902
- json.dump(self.predictions, f, default=locator_serializer, indent=4)
903
-
904
-
905
- with open(os.path.join(self.main_path, 'result.json'), 'w', encoding='utf-8') as file:
906
- json.dump(final_json, file, indent=4)
907
- self.logger.info("Agent stopped.")
908
-
909
- saveconfig(self.config, os.path.join(self.main_path, 'config.toml'))
910
-
911
- def clear_action_history(self):
912
- """
913
- Clears the history of actions taken by the agent.
914
- """
915
- self.taken_actions.clear()
916
- self.logger.info("Cleared action history.")
917
-
918
- def reset_comlete_flag(self, flag=False):
919
- self.complete_flag = flag
920
-
921
- def change_task(self, new_task, clear_history=False):
922
- """
923
- Changes the task requirement for the agent.
924
-
925
- Parameters:
926
- - new_task: The new task requirement as a string.
927
- """
928
- if new_task and isinstance(new_task, str):
929
-
930
- self.logger.info(f"Changed task from {self.tasks[-1]} to: {new_task}")
931
- self.tasks.append(new_task)
932
- # Optionally clear action history when changing task
933
- if clear_history:
934
- self.clear_action_history()
935
- else:
936
- self.taken_actions.append(f"Changed task from {self.tasks[-2]} to: {new_task}")
937
-
938
- else:
939
- self.logger.info("Invalid new task. It must be a non-empty string.")
940
-
941
- # Optionally, you can save the taken_actions to a file or database for record-keeping
942
-
943
- # ADD no op count and op count, add limit to op
944
-
945
- # decompose run to predict and execute.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava/eval/seeact/data_utils/__init__.py DELETED
@@ -1,14 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) 2024 OSU Natural Language Processing Group
3
- #
4
- # Licensed under the OpenRAIL-S License;
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # https://www.licenses.ai/ai-pubs-open-rails-vz1
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava/eval/seeact/data_utils/format_prompt_utils.py DELETED
@@ -1,129 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) 2024 OSU Natural Language Processing Group
3
- #
4
- # Licensed under the OpenRAIL-S License;
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # https://www.licenses.ai/ai-pubs-open-rails-vz1
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import string
17
-
18
- def generate_new_query_prompt(system_prompt="", task="", previous_actions=None, question_description="",select_elements=None):
19
- """
20
- Generate the first phase prompt to ask model to generate general descriptions about {environment, high-level plans, next step action}
21
- Each experiment will have a similar prompt in this phase
22
- This prompt is used to generate models' thoughts without disrupt of formatting/referring prompts
23
- """
24
- sys_role=""+system_prompt
25
- query_text = ""
26
-
27
- # System Prompt
28
- query_text += "You are asked to complete the following task: "
29
-
30
- # Task Description
31
- query_text += task
32
- query_text += "\n\n"
33
-
34
- # Previous Actions
35
- previous_action_text = "Previous Actions:\n"
36
- if previous_actions is None:
37
- previous_actions = []
38
- for action_text in previous_actions:
39
- previous_action_text += action_text
40
- previous_action_text += "\n"
41
- query_text += previous_action_text
42
- query_text += "\n"
43
-
44
- # Question Description
45
- query_text += question_description
46
-
47
- if select_elements:
48
- query_text += "\n"
49
- for element in select_elements:
50
- query_text+=element+'\n'
51
- return [sys_role,query_text]
52
-
53
-
54
-
55
- def generate_new_referring_prompt(referring_description="", element_format="", action_format="", value_format="",
56
- choices=None,split="4"):
57
- referring_prompt = ""
58
-
59
- # Add description about how to format output
60
- if referring_description != "":
61
- referring_prompt += referring_description
62
- referring_prompt += "\n\n"
63
-
64
- # Add element prediction format and choices
65
-
66
-
67
- # Prepare Option texts
68
- # For exp {1, 2, 4}, generate option
69
- # For element_atttribute, set options field at None
70
- # if choices:
71
- # choice_text = format_options(choices)
72
- # referring_prompt += choice_text
73
-
74
- if element_format != "":
75
- referring_prompt += element_format
76
- referring_prompt += "\n\n"
77
-
78
- # Format Action Prediction
79
- if action_format != "":
80
- referring_prompt += action_format
81
- referring_prompt += "\n\n"
82
-
83
- # Format Value Prediction
84
- if value_format != "":
85
- referring_prompt += value_format
86
- referring_prompt += ""
87
-
88
- return referring_prompt
89
-
90
- def format_options(choices):
91
- option_text = ""
92
- abcd = ''
93
- non_abcd = ''
94
-
95
- multi_choice = ''
96
- for multichoice_idx, choice in enumerate(choices):
97
- multi_choice += f"{generate_option_name(multichoice_idx)}. {choice}\n"
98
- abcd += f"{generate_option_name(multichoice_idx)}, "
99
-
100
- non_abcd = generate_option_name(multichoice_idx + 1)
101
-
102
- multi_choice += f"{non_abcd}. None of the other options match the correct element or the action doesn't involve an element."
103
- # option_text += abcd
104
- option_text += f"If none of these elements match your target element or your target action doesn't involve an element, please select {non_abcd}.\n"
105
- option_text += (multi_choice + '\n\n')
106
- return option_text
107
-
108
-
109
- def generate_option_name(index):
110
- if index < 26:
111
- return string.ascii_uppercase[index]
112
- else:
113
- first_letter_index = (index - 26) // 26
114
- second_letter_index = (index - 26) % 26
115
- first_letter = string.ascii_uppercase[first_letter_index]
116
- second_letter = string.ascii_uppercase[second_letter_index]
117
- return f"{first_letter}{second_letter}"
118
-
119
- def get_index_from_option_name(name):
120
- if len(name) == 1:
121
- return string.ascii_uppercase.index(name)
122
- elif len(name) == 2:
123
- first_letter_index = string.ascii_uppercase.index(name[0])
124
- second_letter_index = string.ascii_uppercase.index(name[1])
125
- return 26 + first_letter_index * 26 + second_letter_index
126
- else:
127
- raise Exception("The string should be either 1 or 2 characters long")
128
-
129
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava/eval/seeact/data_utils/prompts.py DELETED
@@ -1,95 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) 2024 OSU Natural Language Processing Group
3
- #
4
- # Licensed under the OpenRAIL-S License;
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # https://www.licenses.ai/ai-pubs-open-rails-vz1
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from .format_prompt_utils import generate_new_referring_prompt, generate_new_query_prompt
17
-
18
- ##### SeeAct Online Prompts
19
-
20
- seeact_online_sys_prompt = '''Imagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click on an element with the mouse, select an option, type text or press Enter with the keyboard. (For your understanding, they are like the click(), select_option() type() and keyboard.press('Enter') functions in playwright respectively) One next step means one operation within the four. Unlike humans, for typing (e.g., in text areas, text boxes) and selecting (e.g., from dropdown menus or <select> elements), you should try directly typing the input or selecting the choice, bypassing the need for an initial click. You should not attempt to create accounts, log in or do the final submission. Terminate when you deem the task complete or if it requires potentially harmful actions.'''
21
-
22
- seeact_online_question_description_new_exp4 = '''The screenshot below shows the webpage you see. Follow the following guidance to think step by step before outlining the next action step at the current stage:
23
-
24
- (Current Webpage Identification)
25
- Firstly, think about what the current webpage is.
26
-
27
- (Previous Action Analysis)
28
- Secondly, combined with the screenshot, analyze each step of the previous action history and their intention one by one. Particularly, pay more attention to the last step, which may be more related to what you should do now as the next step. Specifically, if the last action involved a TYPE, always evaluate whether it necessitates a confirmation step, because typically a single TYPE action does not make effect. (often, simply pressing 'Enter', assuming the default element involved in the last action, unless other clear elements are present for operation).
29
-
30
- (Screenshot Details Analysis)
31
- Closely examine the screenshot to check the status of every part of the webpage to understand what you can operate with and what has been set or completed. You should closely examine the screenshot details to see what steps have been completed by previous actions even though you are given the textual previous actions. Because the textual history may not clearly and sufficiently record some effects of previous actions, you should closely evaluate the status of every part of the webpage to understand what you have done.
32
-
33
- (Next Action Based on Webpage and Analysis)
34
- Then, based on your analysis, in conjunction with human web browsing habits and the logic of web design, decide on the following action. And clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation.
35
-
36
- To be successful, it is important to follow the following rules:
37
- 1. You should only issue a valid action given the current observation.
38
- 2. You should only issue one action at a time
39
- 3. For handling the select dropdown elements on the webpage, it's not necessary for you to provide completely accurate options right now. The full list of options for these elements will be supplied later.'''
40
-
41
- seeact_online_action_format = "ACTION: Choose an action from {CLICK, SELECT, TYPE, PRESS ENTER, TERMINATE, NONE}."
42
-
43
- seeact_online_value_format = "VALUE: Provide additional input based on ACTION.\n\nThe VALUE means:\nIf ACTION == TYPE, specify the " \
44
- "text to be typed.\nIf ACTION == SELECT, indicate the option to be chosen. Revise the selection value to align with the available options within the element.\nIf ACTION == CLICK, PRESS ENTER, TERMINATE or NONE, " \
45
- "write \"None\"."
46
-
47
- seeact_choice_prompt_dict = {
48
- "system_prompt": seeact_online_sys_prompt,
49
-
50
- "question_description": seeact_online_question_description_new_exp4,
51
-
52
- "referring_description": f"""(Reiteration)
53
- First, reiterate your next target element, its detailed location, and the corresponding operation.
54
-
55
- (Multichoice Question)
56
- Below is a multi-choice question, where the choices are elements in the webpage. All elements are arranged in the order based on their height on the webpage, from top to bottom (and from left to right). This arrangement can be used to locate them. From the screenshot, find out where and what each one is on the webpage, taking into account both their text content and HTML details. Then, determine whether one matches your target element. Please examine the choices one by one. Choose the matching one. If multiple options match your answer, choose the most likely one by re-examining the screenshot, the choices, and your further reasoning.""",
57
-
58
- "element_format": """(Final Answer)
59
- Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element choice, action, and value should be in three separate lines.
60
-
61
- Format:
62
-
63
- ELEMENT: The uppercase letter of your choice. (No need for PRESS ENTER)""",
64
-
65
- "action_format": f"{seeact_online_action_format}",
66
-
67
- "value_format": f"{seeact_online_value_format}"
68
- }
69
-
70
-
71
- def generate_prompt(experiment_split, task=None, previous=None, choices=None):
72
- assert experiment_split != None, "Please specify the experiment split."
73
- assert task != None, "Please input the task."
74
- assert previous != None, "Please input the previous actions."
75
-
76
- if False:
77
- raise Exception("Please change experiment type to seeact")
78
- elif experiment_split in ["seeact_online","online","seeact","SeeAct"]:
79
- system_prompt_input = seeact_choice_prompt_dict["system_prompt"]
80
- question_description_input = seeact_choice_prompt_dict["question_description"]
81
- referring_input = seeact_choice_prompt_dict["referring_description"]
82
- element_format_input = seeact_choice_prompt_dict["element_format"]
83
- action_format_input = seeact_choice_prompt_dict["action_format"]
84
- value_format_input = seeact_choice_prompt_dict["value_format"]
85
- prompt_list = []
86
-
87
- prompt_list.extend(
88
- generate_new_query_prompt(system_prompt=system_prompt_input, task=task, previous_actions=previous,
89
- question_description=question_description_input))
90
- prompt_list.append(
91
- generate_new_referring_prompt(referring_description=referring_input, element_format=element_format_input,
92
- action_format=action_format_input, value_format=value_format_input,
93
- choices=choices))
94
- return prompt_list
95
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava/eval/seeact/demo_utils/__init__.py DELETED
File without changes
llava/eval/seeact/demo_utils/browser_helper.py DELETED
@@ -1,457 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) 2024 OSU Natural Language Processing Group
3
- #
4
- # Licensed under the OpenRAIL-S License;
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # https://www.licenses.ai/ai-pubs-open-rails-vz1
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import re
17
- import asyncio
18
- from difflib import SequenceMatcher
19
- from playwright.sync_api import Playwright, expect, sync_playwright
20
- # from playwright.async_api import async_playwright
21
- from pathlib import Path
22
- import toml
23
- import os
24
- import traceback
25
-
26
- async def normal_launch_async(playwright: Playwright,headless=False,args=None):
27
- browser = await playwright.chromium.launch(
28
- traces_dir=None,
29
- headless=False,
30
- args=args,
31
- # ignore_default_args=ignore_args,
32
- # chromium_sandbox=False,
33
- )
34
- return browser
35
-
36
-
37
-
38
- async def normal_new_context_async(
39
- browser,
40
- storage_state=None,
41
- har_path=None,
42
- video_path=None,
43
- tracing=False,
44
- trace_screenshots=False,
45
- trace_snapshots=False,
46
- trace_sources=False,
47
- locale=None,
48
- geolocation=None,
49
- user_agent: str = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
50
- viewport: dict = {"width": 1280, "height": 720},
51
- ):
52
- context = await browser.new_context(
53
- storage_state=storage_state,
54
- user_agent=user_agent,
55
- viewport=viewport,
56
- locale=locale,
57
- record_har_path=har_path,
58
- record_video_dir=video_path,
59
- geolocation=geolocation,
60
- )
61
-
62
- if tracing:
63
- await context.tracing.start(screenshots=trace_screenshots, snapshots=trace_snapshots, sources=trace_sources)
64
- return context
65
-
66
- #
67
- # def persistent_launch(playwright: Playwright, user_data_dir: str = ""):
68
- # context = playwright.chromium.launch_persistent_context(
69
- # user_data_dir=user_data_dir,
70
- # headless=False,
71
- # args=["--no-default-browser-check",
72
- # "--no_sandbox",
73
- # "--disable-blink-features=AutomationControlled",
74
- # ],
75
- # ignore_default_args=ignore_args,
76
- # user_agent="Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
77
- # viewport={"width": 1280, "height": 720},
78
- # bypass_csp=True,
79
- # slow_mo=1000,
80
- # chromium_sandbox=True,
81
- # channel="chrome-dev"
82
- # )
83
- # return context
84
-
85
- #
86
- # async def persistent_launch_async(playwright: Playwright, user_data_dir: str = "", record_video_dir="video"):
87
- # context = await playwright.chromium.launch_persistent_context(
88
- # user_data_dir=user_data_dir,
89
- # headless=False,
90
- # args=[
91
- # "--disable-blink-features=AutomationControlled",
92
- # ],
93
- # ignore_default_args=ignore_args,
94
- # user_agent="Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
95
- # # viewport={"width": 1280, "height": 720},
96
- # record_video_dir=record_video_dir,
97
- # channel="chrome-dev"
98
- # # slow_mo=1000,
99
- # )
100
- # return context
101
-
102
-
103
-
104
- def remove_extra_eol(text):
105
- # Replace EOL symbols
106
- text = text.replace('\n', ' ')
107
- return re.sub(r'\s{2,}', ' ', text)
108
-
109
-
110
- def get_first_line(s):
111
- first_line = s.split('\n')[0]
112
- tokens = first_line.split()
113
- if len(tokens) > 8:
114
- return ' '.join(tokens[:8]) + '...'
115
- else:
116
- return first_line
117
-
118
- async def get_element_description(element, tag_name, role_value, type_value):
119
- '''
120
- Asynchronously generates a descriptive text for a web element based on its tag type.
121
- Handles various HTML elements like 'select', 'input', and 'textarea', extracting attributes and content relevant to accessibility and interaction.
122
- '''
123
- # text_content = await element.inner_text(timeout=0)
124
- # text = (text_content or '').strip()
125
- #
126
- # print(text)
127
- salient_attributes = [
128
- "alt",
129
- "aria-describedby",
130
- "aria-label",
131
- "aria-role",
132
- "input-checked",
133
- # "input-value",
134
- "label",
135
- "name",
136
- "option_selected",
137
- "placeholder",
138
- "readonly",
139
- "text-value",
140
- "title",
141
- "value",
142
- ]
143
-
144
- parent_value = "parent_node: "
145
- parent_locator = element.locator('xpath=..')
146
- num_parents = await parent_locator.count()
147
- if num_parents > 0:
148
- # only will be zero or one parent node
149
- parent_text = (await parent_locator.inner_text(timeout=0) or "").strip()
150
- if parent_text:
151
- parent_value += parent_text
152
- parent_value = remove_extra_eol(get_first_line(parent_value)).strip()
153
- if parent_value == "parent_node:":
154
- parent_value = ""
155
- else:
156
- parent_value += " "
157
-
158
- if tag_name == "select":
159
- text1 = "Selected Options: "
160
- text3 = " - Options: "
161
-
162
- text2 = await element.evaluate(
163
- "select => select.options[select.selectedIndex].textContent", timeout=0
164
- )
165
-
166
- if text2:
167
- options = await element.evaluate("select => Array.from(select.options).map(option => option.text)",
168
- timeout=0)
169
- text4 = " | ".join(options)
170
-
171
- if not text4:
172
- text4 = await element.text_content(timeout=0)
173
- if not text4:
174
- text4 = await element.inner_text(timeout=0)
175
-
176
-
177
- return parent_value+text1 + remove_extra_eol(text2.strip()) + text3 + text4
178
-
179
- input_value = ""
180
-
181
- none_input_type = ["submit", "reset", "checkbox", "radio", "button", "file"]
182
-
183
- if tag_name == "input" or tag_name == "textarea":
184
- if role_value not in none_input_type and type_value not in none_input_type:
185
- text1 = "input value="
186
- text2 = await element.input_value(timeout=0)
187
- if text2:
188
- input_value = text1 + "\"" + text2 + "\"" + " "
189
-
190
- text_content = await element.text_content(timeout=0)
191
- text = (text_content or '').strip()
192
-
193
- # print(text)
194
- if text:
195
- text = remove_extra_eol(text)
196
- if len(text) > 80:
197
- text_content_in = await element.inner_text(timeout=0)
198
- text_in = (text_content_in or '').strip()
199
- if text_in:
200
- return input_value + remove_extra_eol(text_in)
201
- else:
202
- return input_value + text
203
-
204
- # get salient_attributes
205
- text1 = ""
206
- for attr in salient_attributes:
207
- attribute_value = await element.get_attribute(attr, timeout=0)
208
- if attribute_value:
209
- text1 += f"{attr}=" + "\"" + attribute_value.strip() + "\"" + " "
210
-
211
- text = (parent_value + text1).strip()
212
- if text:
213
- return input_value + remove_extra_eol(text.strip())
214
-
215
-
216
- # try to get from the first child node
217
- first_child_locator = element.locator('xpath=./child::*[1]')
218
-
219
- num_childs = await first_child_locator.count()
220
- if num_childs>0:
221
- for attr in salient_attributes:
222
- attribute_value = await first_child_locator.get_attribute(attr, timeout=0)
223
- if attribute_value:
224
- text1 += f"{attr}=" + "\"" + attribute_value.strip() + "\"" + " "
225
-
226
- text = (parent_value + text1).strip()
227
- if text:
228
- return input_value + remove_extra_eol(text.strip())
229
-
230
- return None
231
-
232
-
233
- async def get_element_data(element, tag_name,viewport_size,seen_elements=[],coordinates=None):
234
- try:
235
- tag_name_list = ['a', 'button',
236
- 'input',
237
- 'select', 'textarea', 'adc-tab']
238
-
239
-
240
-
241
-
242
-
243
-
244
- rect = await element.bounding_box() or {'x': -1, 'y': -1, 'width': 0, 'height': 0}
245
-
246
- if rect['x']<0 or rect['y']<0 or rect['width']<=4 or rect['height']<=4 or rect['y']+rect['height']>viewport_size["height"] or rect['x']+ rect['width']>viewport_size["width"]:
247
- return None
248
-
249
-
250
-
251
- if coordinates is not None:
252
- if coordinates[0]>=rect['x'] and coordinates[0]<=rect['x']+rect['width'] and coordinates[1]>=rect['y'] and coordinates[1]<=rect['y']+rect['height']:
253
- print(coordinates)
254
- print(rect)
255
- else:
256
- return None
257
-
258
-
259
- box_model = [rect['x'], rect['y'], rect['x'] + rect['width'], rect['y'] + rect['height']]
260
- center_point = (round((box_model[0] + box_model[2]) / 2 , 3),
261
- round((box_model[1] + box_model[3]) / 2 , 3))
262
-
263
-
264
-
265
-
266
-
267
- if await element.is_hidden(timeout=0) or await element.is_disabled(timeout=0):
268
- return None
269
-
270
- if center_point in seen_elements:
271
- return None
272
-
273
- # await aprint(element,tag_name)
274
-
275
- if tag_name in tag_name_list:
276
- tag_head = tag_name
277
- real_tag_name = tag_name
278
- else:
279
- real_tag_name = await element.evaluate("element => element.tagName.toLowerCase()", timeout=0)
280
- if real_tag_name in tag_name_list:
281
- # already detected
282
- return None
283
- else:
284
- tag_head = real_tag_name
285
-
286
- text_element = ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'td', "div","em","center","strong","b","i","small","mark","abbr","cite","q","blockquote","span","nobr"]
287
-
288
- if real_tag_name in text_element:
289
- return None
290
-
291
- role_value = await element.get_attribute('role', timeout=0)
292
- type_value = await element.get_attribute('type', timeout=0)
293
- # await aprint("start to get element description",element,tag_name )
294
- description = await get_element_description(element, real_tag_name, role_value, type_value)
295
- # print(description)
296
- if not description:
297
- return None
298
-
299
- if role_value:
300
- tag_head += " role=" + "\"" + role_value + "\""
301
- if type_value:
302
- tag_head += " type=" + "\"" + type_value + "\""
303
-
304
- '''
305
- 0: center_point =(x,y)
306
- 1: description
307
- 2: tag_with_role: tag_head with role and type # TODO: Consider adding more
308
- 3. box
309
- 4. selector
310
- 5. tag
311
- '''
312
- selector = element
313
-
314
-
315
- if coordinates is not None:
316
- if coordinates[0]>=rect['x'] and coordinates[0]<=rect['x']+rect['width'] and coordinates[1]>=rect['y'] and coordinates[1]<=rect['y']+rect['height']:
317
- print(tag_head)
318
- print(description)
319
- print(box_model)
320
- else:
321
-
322
-
323
- return None
324
-
325
- return {"center_point":center_point,"description":description,"tag_with_role":tag_head,"box":box_model,"selector":selector,"tag":real_tag_name}
326
- # return [center_point, description, tag_head, box_model, selector, real_tag_name]
327
- except Exception as e:
328
- print(traceback.format_exc())
329
- print(e)
330
- return None
331
-
332
-
333
- async def get_interactive_elements_with_playwright(page,viewport_size,coordinates=None):
334
-
335
- print("Get Interactive elements around: ", coordinates)
336
- interactive_elements_selectors = [
337
- 'a', 'button',
338
- 'input',
339
- 'select', 'textarea',
340
- ]
341
-
342
- seen_elements = set()
343
- tasks = []
344
-
345
-
346
- for selector in interactive_elements_selectors:
347
- locator = page.locator(selector)
348
- element_count = await locator.count()
349
- for index in range(element_count):
350
- element = locator.nth(index)
351
- tag_name = selector
352
- task = get_element_data(element, tag_name,viewport_size,seen_elements=[],coordinates=coordinates)
353
-
354
- tasks.append(task)
355
-
356
- results = await asyncio.gather(*tasks)
357
-
358
- interactive_elements = []
359
- for i in results:
360
- if i:
361
- if i["center_point"] in seen_elements:
362
- continue
363
- else:
364
- seen_elements.add(i["center_point"])
365
- interactive_elements.append(i)
366
-
367
- # interactive_elements_selectors = [
368
- # '*'
369
- # ]
370
- # tasks = []
371
- #
372
- # for selector in interactive_elements_selectors:
373
- # locator = page.locator(selector)
374
- # element_count = await locator.count()
375
- # for index in range(element_count):
376
- # element = locator.nth(index)
377
- # tag_name = selector
378
- # task = get_element_data(element, tag_name, viewport_size,seen_elements,coordinates)
379
- #
380
- # tasks.append(task)
381
- #
382
- # results = await asyncio.gather(*tasks)
383
- #
384
- #
385
- # for i in results:
386
- # if i:
387
- # if i["center_point"] in seen_elements:
388
- # continue
389
- # else:
390
- # seen_elements.add(i["center_point"])
391
- # interactive_elements.append(i)
392
-
393
- return interactive_elements
394
-
395
-
396
-
397
- async def get_select_elements_with_playwright(page,viewport_size):
398
-
399
- interactive_elements_selectors = [
400
- 'select'
401
- ]
402
-
403
- seen_elements = set()
404
- tasks = []
405
-
406
-
407
- for selector in interactive_elements_selectors:
408
- locator = page.locator(selector)
409
- element_count = await locator.count()
410
- for index in range(element_count):
411
- element = locator.nth(index)
412
- tag_name = selector
413
- task = get_element_data(element, tag_name,viewport_size,seen_elements=[],coordinates=None)
414
-
415
- tasks.append(task)
416
-
417
- results = await asyncio.gather(*tasks)
418
-
419
- interactive_elements = []
420
- for i in results:
421
- if i:
422
- if i["center_point"] in seen_elements:
423
- continue
424
- else:
425
- seen_elements.add(i["center_point"])
426
- interactive_elements.append(i)
427
-
428
- return interactive_elements
429
-
430
-
431
- async def select_option(selector, value):
432
- best_option = [-1, "", -1]
433
- for i in range(await selector.locator("option").count()):
434
- option = await selector.locator("option").nth(i).inner_text()
435
- similarity = SequenceMatcher(None, option, value).ratio()
436
- if similarity > best_option[2]:
437
- best_option = [i, option, similarity]
438
- await selector.select_option(index=best_option[0], timeout=10000)
439
- return remove_extra_eol(best_option[1]).strip()
440
-
441
-
442
- def saveconfig(config, save_file):
443
- """
444
- config is a dictionary.
445
- save_path: saving path include file name.
446
- """
447
-
448
-
449
- if isinstance(save_file, str):
450
- save_file = Path(save_file)
451
- if isinstance(config, dict):
452
- with open(save_file, 'w') as f:
453
- config_without_key = config
454
- config_without_key["openai"]["api_key"] = "Your API key here"
455
- toml.dump(config_without_key, f)
456
- else:
457
- os.system(" ".join(["cp", str(config), str(save_file)]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava/eval/seeact/demo_utils/format_prompt.py DELETED
@@ -1,133 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) 2024 OSU Natural Language Processing Group
3
- #
4
- # Licensed under the OpenRAIL-S License;
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # https://www.licenses.ai/ai-pubs-open-rails-vz1
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- import re
16
-
17
-
18
- def format_choices(elements):
19
-
20
- converted_elements = [
21
- f'<{element["tag_with_role"]}>'
22
- + (
23
- element["description"]
24
- )
25
- + f"</{element['tag']}>"
26
- for i, element in enumerate(elements)
27
- ]
28
-
29
- return converted_elements
30
-
31
- def postprocess_action_lmm(text):
32
- text = text.strip()
33
- text = text.replace(
34
- "The uppercase letter of your choice. Choose one of the following elements if it matches the target element based on your analysis:\n\n",
35
- "")
36
- text = text.replace(
37
- "The uppercase letter of your choice. Choose one of the following elements if it matches the target element based on your analysis:\n",
38
- "")
39
- text = text.replace(
40
- "The uppercase letter of your choice. Choose one of the following elements if it matches the target element based on your analysis:",
41
- "")
42
- text = text.replace("The uppercase letter of your choice based on your analysis is:\n\n", "")
43
- text = text.replace("The uppercase letter of your choice based on your analysis is:\n", "")
44
- text = text.replace("The uppercase letter of your choice based on your analysis is:", "")
45
- text = text.replace("The uppercase letter of my choice is \n\n", "")
46
- text = text.replace("The uppercase letter of my choice is \n", "")
47
- text = text.replace("The uppercase letter of my choice is ", "")
48
- text = text.replace("The uppercase letter of your choice is \n\n", "")
49
- text = text.replace("The uppercase letter of your choice is \n", "")
50
- text = text.replace("The uppercase letter of your choice is ", "")
51
- text = text.replace("The uppercase letter of your choice.\n\n", "")
52
- text = text.replace("The uppercase letter of your choice.\n", "")
53
- text = text.replace("The uppercase letter of your choice.", "")
54
- text = text.replace("The uppercase letter of your choice based on my analysis is:\n\n", "")
55
- text = text.replace("The uppercase letter of your choice based on my analysis is:\n", "")
56
- text = text.replace("The uppercase letter of your choice based on my analysis is:", "")
57
- text = text.replace("The correct choice based on the analysis would be:\n\n", "")
58
- text = text.replace("The correct choice based on the analysis would be:\n", "")
59
- text = text.replace("The correct choice based on the analysis would be :", "")
60
- text = text.replace("The correct choice based on the analysis would be ", "")
61
- text = text.replace(
62
- "The uppercase letter of your choice. Choose one of the following elements if it matches the target element based on your analysis:\n\n",
63
- "")
64
- text = text.replace(
65
- "The uppercase letter of your choice. Choose one of the following elements if it matches the target element based on your analysis:\n",
66
- "")
67
- text = text.replace(
68
- "The uppercase letter of your choice. Choose one of the following elements if it matches the target element based on your analysis:",
69
- "")
70
- text = text.replace("The uppercase letter of your choice.\n\n", "")
71
- text = text.replace("The uppercase letter of your choice.\n", "")
72
- text = text.replace("The uppercase letter of your choice based on the analysis is:\n\n", "")
73
- text = text.replace("The uppercase letter of your choice based on the analysis is:\n", "")
74
- text = text.replace("The uppercase letter of your choice based on the analysis is:", "")
75
- text = text.replace("The uppercase letter of your choice based on the analysis is ", "")
76
- text = text.replace("The uppercase letter of my choice based on the analysis is:\n\n", "")
77
- text = text.replace("The uppercase letter of my choice based on the analysis is:\n", "")
78
- text = text.replace("The uppercase letter of my choice based on the analysis is:", "")
79
- text = text.replace("The uppercase letter of my choice based on the analysis is ", "")
80
- text = text.replace("The correct element to select would be:\n\n", "")
81
- text = text.replace("The correct element to select would be:\n", "")
82
- text = text.replace("The correct element to select would be:", "")
83
- text = text.replace("The correct element to select would be ", "")
84
- text = text.replace("The uppercase letter of my choice is:\n\n", "")
85
- text = text.replace("The uppercase letter of my choice is:\n", "")
86
- text = text.replace("The uppercase letter of my choice is:", "")
87
- text = text.replace("The uppercase letter of my choice is ", "")
88
- text = text.replace("Choose an action from {CLICK, TYPE, SELECT}.\n\n", "")
89
- text = text.replace("Choose an action from {CLICK, TYPE, SELECT}.\n", "")
90
- text = text.replace("Choose an action from {CLICK, TYPE, SELECT}.", "")
91
- text = text.replace("Provide additional input based on ACTION.\n\n", "")
92
- text = text.replace("Provide additional input based on ACTION.\n", "")
93
- text = text.replace("Provide additional input based on ACTION.", "")
94
-
95
- def extract_element_description(text):
96
- pattern = r'ELEMENT:\s*(.*?)\s*ACTION:'
97
- match = re.search(pattern, text)
98
- if match:
99
- return match.group(1)
100
- else:
101
- return None
102
-
103
- description = extract_element_description(text)
104
- action = re.search(
105
- r"ACTION: (CLICK|SELECT|TYPE|HOVER|PRESS ENTER|SCROLL UP|SCROLL DOWN|PRESS HOME|PRESS END|PRESS PAGEUP|PRESS PAGEDOWN|NEW TAB|CLOSE TAB|GO BACK|GO FORWARD|TERMINATE|NONE|GOTO|SAY|MEMORIZE)",
106
- text
107
- )
108
-
109
-
110
- if action:
111
- action = action.group(1)
112
- else:
113
- action = "None"
114
-
115
- value = re.search(r"VALUE: (.*)$", text, re.MULTILINE)
116
- value = value.group(1) if value is not None else ""
117
- return description, action.strip(), process_string(process_string(value.strip()))
118
-
119
- def process_string(input_string):
120
- if input_string.startswith('"') and input_string.endswith('"'):
121
- input_string = input_string[1:-1]
122
- if input_string.endswith('.'):
123
- input_string = input_string[:-1]
124
- return input_string
125
-
126
-
127
-
128
-
129
-
130
-
131
-
132
-
133
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava/eval/seeact/demo_utils/inference_engine.py DELETED
@@ -1,314 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) 2024 OSU Natural Language Processing Group
3
- #
4
- # Licensed under the OpenRAIL-S License;
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # https://www.licenses.ai/ai-pubs-open-rails-vz1
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- import os
16
- import time
17
-
18
- import backoff
19
- import openai
20
- from openai import (
21
- APIConnectionError,
22
- APIError,
23
- RateLimitError,
24
- )
25
- import requests
26
- from dotenv import load_dotenv
27
- import litellm
28
- import base64
29
-
30
- EMPTY_API_KEY="Your API KEY Here"
31
-
32
- def load_openai_api_key():
33
- load_dotenv()
34
- assert (
35
- os.getenv("OPENAI_API_KEY") is not None and
36
- os.getenv("OPENAI_API_KEY") != EMPTY_API_KEY
37
- ), "must pass on the api_key or set OPENAI_API_KEY in the environment"
38
- return os.getenv("OPENAI_API_KEY")
39
-
40
-
41
- def load_gemini_api_key():
42
- load_dotenv()
43
- assert (
44
- os.getenv("GEMINI_API_KEY") is not None and
45
- os.getenv("GEMINI_API_KEY") != EMPTY_API_KEY
46
- ), "must pass on the api_key or set GEMINI_API_KEY in the environment"
47
- return os.getenv("GEMINI_API_KEY")
48
-
49
- def encode_image(image_path):
50
- with open(image_path, "rb") as image_file:
51
- return base64.b64encode(image_file.read()).decode('utf-8')
52
-
53
-
54
- def engine_factory(api_key=None, model=None, **kwargs):
55
- model = model.lower()
56
- if model in ["gpt-4-vision-preview", "gpt-4-turbo", "gpt-4o"]:
57
- if api_key and api_key != EMPTY_API_KEY:
58
- os.environ["OPENAI_API_KEY"] = api_key
59
- else:
60
- load_openai_api_key()
61
- return OpenAIEngine(model=model, **kwargs)
62
- elif model in ["gemini-1.5-pro-latest", "gemini-1.5-flash"]:
63
- if api_key and api_key != EMPTY_API_KEY:
64
- os.environ["GEMINI_API_KEY"] = api_key
65
- else:
66
- load_gemini_api_key()
67
- model=f"gemini/{model}"
68
- return GeminiEngine(model=model, **kwargs)
69
- elif model == "llava":
70
- model="llava"
71
- return OllamaEngine(model=model, **kwargs)
72
- raise Exception(f"Unsupported model: {model}, currently supported models: \
73
- gpt-4-vision-preview, gpt-4-turbo, gemini-1.5-pro-latest, llava")
74
-
75
- class Engine:
76
- def __init__(
77
- self,
78
- stop=["\n\n"],
79
- rate_limit=-1,
80
- model=None,
81
- temperature=0,
82
- **kwargs,
83
- ) -> None:
84
- """
85
- Base class to init an engine
86
-
87
- Args:
88
- api_key (_type_, optional): Auth key from OpenAI. Defaults to None.
89
- stop (list, optional): Tokens indicate stop of sequence. Defaults to ["\n"].
90
- rate_limit (int, optional): Max number of requests per minute. Defaults to -1.
91
- model (_type_, optional): Model family. Defaults to None.
92
- """
93
- self.time_slots = [0]
94
- self.stop = stop
95
- self.temperature = temperature
96
- self.model = model
97
- # convert rate limit to minmum request interval
98
- self.request_interval = 0 if rate_limit == -1 else 60.0 / rate_limit
99
- self.next_avil_time = [0] * len(self.time_slots)
100
- self.current_key_idx = 0
101
- print(f"Initializing model {self.model}")
102
-
103
- def tokenize(self, input):
104
- return self.tokenizer(input)
105
-
106
-
107
- class OllamaEngine(Engine):
108
- def __init__(self, **kwargs) -> None:
109
- """
110
- Init an Ollama engine
111
- To use Ollama, dowload and install Ollama from https://ollama.com/
112
- After Ollama start, pull llava with command: ollama pull llava
113
- """
114
- super().__init__(**kwargs)
115
- self.api_url = "http://localhost:11434/api/chat"
116
-
117
-
118
- def generate(self, prompt: list = None, max_new_tokens=4096, temperature=None, model=None, image_path=None,
119
- ouput_0=None, turn_number=0, **kwargs):
120
- self.current_key_idx = (self.current_key_idx + 1) % len(self.time_slots)
121
- start_time = time.time()
122
- if (
123
- self.request_interval > 0
124
- and start_time < self.next_avil_time[self.current_key_idx]
125
- ):
126
- wait_time = self.next_avil_time[self.current_key_idx] - start_time
127
- print(f"Wait {wait_time} for rate limitting")
128
- time.sleep(wait_time)
129
- prompt0, prompt1, prompt2 = prompt
130
-
131
- base64_image = encode_image(image_path)
132
- if turn_number == 0:
133
- # Assume one turn dialogue
134
- prompt_input = [
135
- {"role": "assistant", "content": prompt0},
136
- {"role": "user", "content": prompt1, "images": [f"{base64_image}"]},
137
- ]
138
- elif turn_number == 1:
139
- prompt_input = [
140
- {"role": "assistant", "content": prompt0},
141
- {"role": "user", "content": prompt1, "images": [f"{base64_image}"]},
142
- {"role": "assistant", "content": f"\n\n{ouput_0}"},
143
- {"role": "user", "content": prompt2},
144
- ]
145
-
146
- options = {"temperature": self.temperature, "num_predict": max_new_tokens}
147
- data = {
148
- "model": self.model,
149
- "messages": prompt_input,
150
- "options": options,
151
- "stream": False,
152
- }
153
- _request = {
154
- "url": f"{self.api_url}",
155
- "json": data,
156
- }
157
- response = requests.post(**_request) # type: ignore
158
- if response.status_code != 200:
159
- raise Exception(f"Ollama API Error: {response.status_code}, {response.text}")
160
- response_json = response.json()
161
- return response_json["message"]["content"]
162
-
163
-
164
- class GeminiEngine(Engine):
165
- def __init__(self, **kwargs) -> None:
166
- """
167
- Init a Gemini engine
168
- To use this engine, please provide the GEMINI_API_KEY in the environment
169
- Supported Model Rate Limit
170
- gemini-1.5-pro-latest 2 queries per minute, 1000 queries per day
171
- """
172
- super().__init__(**kwargs)
173
-
174
-
175
- def generate(self, prompt: list = None, max_new_tokens=4096, temperature=None, model=None, image_path=None,
176
- ouput_0=None, turn_number=0, **kwargs):
177
- self.current_key_idx = (self.current_key_idx + 1) % len(self.time_slots)
178
- start_time = time.time()
179
- if (
180
- self.request_interval > 0
181
- and start_time < self.next_avil_time[self.current_key_idx]
182
- ):
183
- wait_time = self.next_avil_time[self.current_key_idx] - start_time
184
- print(f"Wait {wait_time} for rate limitting")
185
- prompt0, prompt1, prompt2 = prompt
186
- litellm.set_verbose=True
187
-
188
- base64_image = encode_image(image_path)
189
- if turn_number == 0:
190
- # Assume one turn dialogue
191
- prompt_input = [
192
- {"role": "system", "content": prompt0},
193
- {"role": "user",
194
- "content": [{"type": "text", "text": prompt1}, {"type": "image_url", "image_url": {"url": image_path,
195
- "detail": "high"},
196
- }]},
197
- ]
198
- elif turn_number == 1:
199
- prompt_input = [
200
- {"role": "system", "content": prompt0},
201
- {"role": "user",
202
- "content": [{"type": "text", "text": prompt1}, {"type": "image_url", "image_url": {"url": image_path,
203
- "detail": "high"},
204
- }]},
205
- {"role": "assistant", "content": [{"type": "text", "text": f"\n\n{ouput_0}"}]},
206
- {"role": "user", "content": [{"type": "text", "text": prompt2}]},
207
- ]
208
- response = litellm.completion(
209
- model=model if model else self.model,
210
- messages=prompt_input,
211
- max_tokens=max_new_tokens if max_new_tokens else 4096,
212
- temperature=temperature if temperature else self.temperature,
213
- **kwargs,
214
- )
215
- return [choice["message"]["content"] for choice in response.choices][0]
216
-
217
-
218
- class OpenAIEngine(Engine):
219
- def __init__(self, **kwargs) -> None:
220
- """
221
- Init an OpenAI GPT/Codex engine
222
- To find your OpenAI API key, visit https://platform.openai.com/api-keys
223
- """
224
- super().__init__(**kwargs)
225
-
226
- @backoff.on_exception(
227
- backoff.expo,
228
- (APIError, RateLimitError, APIConnectionError),
229
- )
230
- def generate(self, prompt: list = None, max_new_tokens=4096, temperature=None, model=None, image_path=None,
231
- ouput_0=None, turn_number=0, **kwargs):
232
- self.current_key_idx = (self.current_key_idx + 1) % len(self.time_slots)
233
- start_time = time.time()
234
- if (
235
- self.request_interval > 0
236
- and start_time < self.next_avil_time[self.current_key_idx]
237
- ):
238
- time.sleep(self.next_avil_time[self.current_key_idx] - start_time)
239
- prompt0, prompt1, prompt2 = prompt
240
- # litellm.set_verbose=True
241
-
242
- base64_image = encode_image(image_path)
243
- if turn_number == 0:
244
- # Assume one turn dialogue
245
- prompt_input = [
246
- {"role": "system", "content": [{"type": "text", "text": prompt0}]},
247
- {"role": "user",
248
- "content": [{"type": "text", "text": prompt1}, {"type": "image_url", "image_url": {"url":
249
- f"data:image/jpeg;base64,{base64_image}",
250
- "detail": "high"},
251
- }]},
252
- ]
253
- elif turn_number == 1:
254
- prompt_input = [
255
- {"role": "system", "content": [{"type": "text", "text": prompt0}]},
256
- {"role": "user",
257
- "content": [{"type": "text", "text": prompt1}, {"type": "image_url", "image_url": {"url":
258
- f"data:image/jpeg;base64,{base64_image}",
259
- "detail": "high"}, }]},
260
- {"role": "assistant", "content": [{"type": "text", "text": f"\n\n{ouput_0}"}]},
261
- {"role": "user", "content": [{"type": "text", "text": prompt2}]},
262
- ]
263
- response = litellm.completion(
264
- model=model if model else self.model,
265
- messages=prompt_input,
266
- max_tokens=max_new_tokens if max_new_tokens else 4096,
267
- temperature=temperature if temperature else self.temperature,
268
- **kwargs,
269
- )
270
- return [choice["message"]["content"] for choice in response.choices][0]
271
-
272
-
273
- class OpenaiEngine_MindAct(Engine):
274
- def __init__(self, **kwargs) -> None:
275
- """Init an OpenAI GPT/Codex engine
276
-
277
- Args:
278
- api_key (_type_, optional): Auth key from OpenAI. Defaults to None.
279
- stop (list, optional): Tokens indicate stop of sequence. Defaults to ["\n"].
280
- rate_limit (int, optional): Max number of requests per minute. Defaults to -1.
281
- model (_type_, optional): Model family. Defaults to None.
282
- """
283
- super().__init__(**kwargs)
284
- #
285
- @backoff.on_exception(
286
- backoff.expo,
287
- (APIError, RateLimitError, APIConnectionError),
288
- )
289
- def generate(self, prompt, max_new_tokens=50, temperature=0, model=None, **kwargs):
290
- self.current_key_idx = (self.current_key_idx + 1) % len(self.time_slots)
291
- start_time = time.time()
292
- if (
293
- self.request_interval > 0
294
- and start_time < self.next_avil_time[self.current_key_idx]
295
- ):
296
- time.sleep(self.next_avil_time[self.current_key_idx] - start_time)
297
- if isinstance(prompt, str):
298
- # Assume one turn dialogue
299
- prompt = [
300
- {"role": "user", "content": prompt},
301
- ]
302
- response = litellm.completion(
303
- model=model if model else self.model,
304
- messages=prompt,
305
- max_tokens=max_new_tokens,
306
- temperature=temperature,
307
- **kwargs,
308
- )
309
- if self.request_interval > 0:
310
- self.next_avil_time[self.current_key_idx] = (
311
- max(start_time, self.next_avil_time[self.current_key_idx])
312
- + self.request_interval
313
- )
314
- return [choice["message"]["content"] for choice in response["choices"]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava/eval/seeact_run.py DELETED
@@ -1,118 +0,0 @@
1
- import asyncio
2
- import os
3
- import random
4
-
5
- from seeact.agent import SeeActAgent
6
-
7
- # Setup your API Key here, or pass through environment
8
- os.environ["OPENAI_API_KEY"] = "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"
9
- # os.environ["GEMINI_API_KEY"] = "Your API KEY Here"
10
-
11
-
12
- async def run_agent():
13
- agent = SeeActAgent(model="gpt-4o")
14
- await agent.start()
15
- while not agent.complete_flag:
16
- prediction_dict = await agent.predict()
17
- await agent.execute(prediction_dict)
18
- await agent.stop()
19
-
20
- if __name__ == "__main__":
21
- asyncio.run(run_agent())
22
- #
23
- # async def run_agent():
24
- #
25
- # import json
26
- #
27
- # # Define the input file path
28
- # input_file = "tasks.json"
29
- #
30
- # # Load the data from the input file
31
- # with open(input_file, 'r', encoding='utf-8') as infile:
32
- # data = json.load(infile)
33
- #
34
- # # Iterate through each data point and print the required fields
35
- #
36
- # data=[i for i in data]
37
- # random.shuffle(data)
38
- # for item in data:
39
- #
40
- #
41
- # website = item.get("website", "N/A")
42
- # task_id = item.get("task_id", "N/A")
43
- # confirmed_task = item.get("confirmed_task", "N/A")
44
- #
45
- # agent = SeeActAgent(model="gpt-4o",default_task=confirmed_task,default_website=website,save_task_id=task_id)
46
- # await agent.start()
47
- # count = 0
48
- # try:
49
- # while not agent.complete_flag and count < 20:
50
- # prediction_dict = await agent.predict()
51
- # await agent.execute(prediction_dict)
52
- # count += 1
53
- # except Exception as e:
54
- # print(e)
55
- # try:
56
- # await agent.stop()
57
- # except Exception as e:
58
- # print(e)
59
- #
60
- #
61
- #
62
- #
63
- # if __name__ == "__main__":
64
- # asyncio.run(run_agent())
65
-
66
-
67
-
68
-
69
-
70
-
71
-
72
- # import asyncio
73
- # import os
74
- # from seeact.agent import SeeActAgent
75
- #
76
- # # Setup your API Key here, or pass through environment
77
- # os.environ["OPENAI_API_KEY"] = "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"
78
- # # os.environ["GEMINI_API_KEY"] = "Your API KEY Here"
79
- #
80
- # async def process_task(item):
81
- # website = item.get("website", "N/A")
82
- # task_id = item.get("task_id", "N/A")
83
- # confirmed_task = item.get("confirmed_task", "N/A")
84
- #
85
- # agent = SeeActAgent(model="gpt-4o", default_task=confirmed_task, default_website=website, save_task_id=task_id,save_file_dir="mind2web-online90")
86
- # await agent.start()
87
- # count = 0
88
- # while not agent.complete_flag and count < 20:
89
- # prediction_dict = await agent.predict()
90
- # await agent.execute(prediction_dict)
91
- # count += 1
92
- # await agent.stop()
93
- #
94
- # async def run_agent():
95
- # import json
96
- #
97
- # # Define the input file path
98
- # input_file = "tasks.json"
99
- #
100
- # # Load the data from the input file
101
- # with open(input_file, 'r', encoding='utf-8') as infile:
102
- # data = json.load(infile)
103
- #
104
- # # Use a semaphore to limit the number of concurrent tasks
105
- # semaphore = asyncio.Semaphore(1)
106
- #
107
- # async def sem_task(item):
108
- # async with semaphore:
109
- # await process_task(item)
110
- #
111
- # # Schedule all the tasks
112
- # tasks = [sem_task(item) for item in data]
113
- # await asyncio.gather(*tasks)
114
- #
115
- # if __name__ == "__main__":
116
- # asyncio.run(run_agent())
117
-
118
-