bowenchen118 commited on
Commit
6b8dbdd
·
1 Parent(s): f326b43

Change to user-provided API keys

Browse files
app.py CHANGED
@@ -18,9 +18,7 @@ from opentools.models.initializer import Initializer
18
  from opentools.models.planner import Planner
19
  from opentools.models.memory import Memory
20
  from opentools.models.executor import Executor
21
- from opentools.models.utlis import make_json_serializable
22
-
23
- solver = None
24
 
25
  class ChatMessage:
26
  def __init__(self, role: str, content: str, metadata: dict = None):
@@ -63,7 +61,7 @@ class Solver:
63
 
64
 
65
 
66
- def stream_solve_user_problem(self, user_query: str, user_image: Image.Image, messages: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
67
  """
68
  Streams intermediate thoughts and final responses for the problem-solving process based on user input.
69
 
@@ -198,39 +196,28 @@ def parse_arguments():
198
  return parser.parse_args()
199
 
200
 
201
- def solve_problem_gradio(user_query, user_image):
202
  """
203
  Wrapper function to connect the solver to Gradio.
204
  Streams responses from `solver.stream_solve_user_problem` for real-time UI updates.
205
  """
206
- global solver # Ensure we're using the globally defined solver
207
-
208
- if solver is None:
209
- return [["assistant", "⚠️ Error: Solver is not initialized. Please restart the application."]]
210
-
211
- messages = [] # Initialize message list
212
- for message_batch in solver.stream_solve_user_problem(user_query, user_image, messages):
213
- yield [[msg.role, msg.content] for msg in message_batch] # Ensure correct format for Gradio Chatbot
214
-
215
-
216
-
217
- def main(args):
218
- global solver
219
  # Initialize Tools
220
  enabled_tools = args.enabled_tools.split(",") if args.enabled_tools else []
221
 
222
-
223
  # Instantiate Initializer
224
  initializer = Initializer(
225
  enabled_tools=enabled_tools,
226
- model_string=args.llm_engine_name
 
227
  )
228
 
229
  # Instantiate Planner
230
  planner = Planner(
231
  llm_engine_name=args.llm_engine_name,
232
  toolbox_metadata=initializer.toolbox_metadata,
233
- available_tools=initializer.available_tools
 
234
  )
235
 
236
  # Instantiate Memory
@@ -240,7 +227,8 @@ def main(args):
240
  executor = Executor(
241
  llm_engine_name=args.llm_engine_name,
242
  root_cache_dir=args.root_cache_dir,
243
- enable_signal=False
 
244
  )
245
 
246
  # Instantiate Solver
@@ -258,44 +246,31 @@ def main(args):
258
  root_cache_dir=args.root_cache_dir
259
  )
260
 
261
- # Test Inputs
262
- # user_query = "How many balls are there in the image?"
263
- # user_image_path = "/home/sheng/toolbox-agent/mathvista_113.png" # Replace with your actual image path
264
-
265
- # # Load the image as a PIL object
266
- # user_image = Image.open(user_image_path).convert("RGB") # Ensure it's in RGB mode
267
-
268
- # print("\n=== Starting Problem Solving ===\n")
269
- # messages = []
270
- # for message_batch in solver.stream_solve_user_problem(user_query, user_image, messages):
271
- # for message in message_batch:
272
- # print(f"{message.role}: {message.content}")
273
-
274
- # messages = []
275
- # solver.stream_solve_user_problem(user_query, user_image, messages)
276
-
277
 
278
- # def solve_problem_stream(user_query, user_image):
279
- # messages = [] # Ensure it's a list of [role, content] pairs
 
280
 
281
- # for message_batch in solver.stream_solve_user_problem(user_query, user_image, messages):
282
- # yield message_batch # Stream messages correctly in tuple format
283
 
284
- # solve_problem_stream(user_query, user_image)
285
 
 
286
  # ========== Gradio Interface ==========
287
  with gr.Blocks() as demo:
288
  gr.Markdown("# 🧠 OctoTools AI Solver") # Title
289
 
290
  with gr.Row():
291
- user_query = gr.Textbox(label="Enter your query", placeholder="Type your question here...")
292
- user_image = gr.Image(type="pil", label="Upload an image") # Accepts multiple formats
293
-
294
- run_button = gr.Button("Run") # Run button
295
- chatbot_output = gr.Chatbot(label="Problem-Solving Output")
 
 
296
 
297
  # Link button click to function
298
- run_button.click(fn=solve_problem_gradio, inputs=[user_query, user_image], outputs=chatbot_output)
299
 
300
  # Launch the Gradio app
301
  demo.launch()
 
18
  from opentools.models.planner import Planner
19
  from opentools.models.memory import Memory
20
  from opentools.models.executor import Executor
21
+ from opentools.models.utils import make_json_serializable
 
 
22
 
23
  class ChatMessage:
24
  def __init__(self, role: str, content: str, metadata: dict = None):
 
61
 
62
 
63
 
64
+ def stream_solve_user_problem(self, user_query: str, user_image: Image.Image, api_key: str, messages: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
65
  """
66
  Streams intermediate thoughts and final responses for the problem-solving process based on user input.
67
 
 
196
  return parser.parse_args()
197
 
198
 
199
+ def solve_problem_gradio(user_query, user_image, api_key):
200
  """
201
  Wrapper function to connect the solver to Gradio.
202
  Streams responses from `solver.stream_solve_user_problem` for real-time UI updates.
203
  """
204
+
 
 
 
 
 
 
 
 
 
 
 
 
205
  # Initialize Tools
206
  enabled_tools = args.enabled_tools.split(",") if args.enabled_tools else []
207
 
 
208
  # Instantiate Initializer
209
  initializer = Initializer(
210
  enabled_tools=enabled_tools,
211
+ model_string=args.llm_engine_name,
212
+ api_key=api_key
213
  )
214
 
215
  # Instantiate Planner
216
  planner = Planner(
217
  llm_engine_name=args.llm_engine_name,
218
  toolbox_metadata=initializer.toolbox_metadata,
219
+ available_tools=initializer.available_tools,
220
+ api_key=api_key
221
  )
222
 
223
  # Instantiate Memory
 
227
  executor = Executor(
228
  llm_engine_name=args.llm_engine_name,
229
  root_cache_dir=args.root_cache_dir,
230
+ enable_signal=False,
231
+ api_key=api_key
232
  )
233
 
234
  # Instantiate Solver
 
246
  root_cache_dir=args.root_cache_dir
247
  )
248
 
249
+ if solver is None:
250
+ return [["assistant", "⚠️ Error: Solver is not initialized. Please restart the application."]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
 
252
+ messages = [] # Initialize message list
253
+ for message_batch in solver.stream_solve_user_problem(user_query, user_image, api_key, messages):
254
+ yield [[msg.role, msg.content] for msg in message_batch] # Ensure correct format for Gradio Chatbot
255
 
 
 
256
 
 
257
 
258
+ def main(args):
259
  # ========== Gradio Interface ==========
260
  with gr.Blocks() as demo:
261
  gr.Markdown("# 🧠 OctoTools AI Solver") # Title
262
 
263
  with gr.Row():
264
+ with gr.Column(scale=1, min_width=300):
265
+ user_query = gr.Textbox(label="Enter your query", placeholder="Type your question here...")
266
+ api_key = gr.Textbox(label="API Key", placeholder="Your API key will not be stored in any way.", type="password")
267
+ user_image = gr.Image(type="pil", label="Upload an image") # Accepts multiple formats
268
+ run_button = gr.Button("Run") # Run button
269
+ with gr.Column(scale=3, min_width=300):
270
+ chatbot_output = gr.Chatbot(label="Problem-Solving Output")
271
 
272
  # Link button click to function
273
+ run_button.click(fn=solve_problem_gradio, inputs=[user_query, user_image, api_key], outputs=chatbot_output)
274
 
275
  # Launch the Gradio app
276
  demo.launch()
opentools/engine/openai.py CHANGED
@@ -43,6 +43,7 @@ class ChatOpenAI(EngineLM, CachedEngine):
43
  is_multimodal: bool=False,
44
  # enable_cache: bool=True,
45
  enable_cache: bool=False, # NOTE: disable cache for now
 
46
  **kwargs):
47
  """
48
  :param model_string:
@@ -61,11 +62,11 @@ class ChatOpenAI(EngineLM, CachedEngine):
61
  super().__init__(cache_path=cache_path)
62
 
63
  self.system_prompt = system_prompt
64
- if os.getenv("OPENAI_API_KEY") is None:
65
  raise ValueError("Please set the OPENAI_API_KEY environment variable if you'd like to use OpenAI models.")
66
 
67
  self.client = OpenAI(
68
- api_key=os.getenv("OPENAI_API_KEY"),
69
  )
70
  self.model_string = model_string
71
  self.is_multimodal = is_multimodal
 
43
  is_multimodal: bool=False,
44
  # enable_cache: bool=True,
45
  enable_cache: bool=False, # NOTE: disable cache for now
46
+ api_key: str=None,
47
  **kwargs):
48
  """
49
  :param model_string:
 
62
  super().__init__(cache_path=cache_path)
63
 
64
  self.system_prompt = system_prompt
65
+ if api_key is None:
66
  raise ValueError("Please set the OPENAI_API_KEY environment variable if you'd like to use OpenAI models.")
67
 
68
  self.client = OpenAI(
69
+ api_key=api_key,
70
  )
71
  self.model_string = model_string
72
  self.is_multimodal = is_multimodal
opentools/models/executor.py CHANGED
@@ -18,13 +18,14 @@ def timeout_handler(signum, frame):
18
  raise TimeoutError("Function execution timed out")
19
 
20
  class Executor:
21
- def __init__(self, llm_engine_name: str, root_cache_dir: str = "solver_cache", num_threads: int = 1, max_time: int = 120, max_output_length: int = 100000, enable_signal: bool = True):
22
  self.llm_engine_name = llm_engine_name
23
  self.root_cache_dir = root_cache_dir
24
  self.num_threads = num_threads
25
  self.max_time = max_time
26
  self.max_output_length = max_output_length
27
  self.enable_signal = enable_signal
 
28
 
29
  def set_query_cache_dir(self, query_cache_dir):
30
  if query_cache_dir:
@@ -130,7 +131,7 @@ Reason: The command should process multiple items in a single execution, not sep
130
  Remember: Your <command> field MUST be valid Python code including any necessary data preparation steps and one or more `execution = tool.execute(` calls, without any additional explanatory text. The format `execution = tool.execute` must be strictly followed, and the last line must begin with `execution = tool.execute` to capture the final output.
131
  """
132
 
133
- llm_generate_tool_command = ChatOpenAI(model_string=self.llm_engine_name, is_multimodal=False)
134
  tool_command = llm_generate_tool_command(prompt_generate_tool_command, response_format=ToolCommand)
135
 
136
  return tool_command
@@ -207,12 +208,14 @@ Remember: Your <command> field MUST be valid Python code including any necessary
207
 
208
  # Check if the tool requires an LLM engine
209
  # NOTE FIXME may need to refine base.py and tool.py to handle this better
 
210
  if getattr(tool_class, 'require_llm_engine', False):
211
  # Instantiate the tool with the model_string
212
- tool = tool_class(model_string=self.llm_engine_name)
213
- else:
214
- # Instantiate the tool without model_string for tools that don't require it
215
- tool = tool_class()
 
216
 
217
  # Set the custom output directory
218
  # NOTE FIXME: May have a better way to handle this
 
18
  raise TimeoutError("Function execution timed out")
19
 
20
  class Executor:
21
+ def __init__(self, llm_engine_name: str, root_cache_dir: str = "solver_cache", num_threads: int = 1, max_time: int = 120, max_output_length: int = 100000, enable_signal: bool = True, api_key: str = None):
22
  self.llm_engine_name = llm_engine_name
23
  self.root_cache_dir = root_cache_dir
24
  self.num_threads = num_threads
25
  self.max_time = max_time
26
  self.max_output_length = max_output_length
27
  self.enable_signal = enable_signal
28
+ self.api_key = api_key
29
 
30
  def set_query_cache_dir(self, query_cache_dir):
31
  if query_cache_dir:
 
131
  Remember: Your <command> field MUST be valid Python code including any necessary data preparation steps and one or more `execution = tool.execute(` calls, without any additional explanatory text. The format `execution = tool.execute` must be strictly followed, and the last line must begin with `execution = tool.execute` to capture the final output.
132
  """
133
 
134
+ llm_generate_tool_command = ChatOpenAI(model_string=self.llm_engine_name, is_multimodal=False, api_key=self.api_key)
135
  tool_command = llm_generate_tool_command(prompt_generate_tool_command, response_format=ToolCommand)
136
 
137
  return tool_command
 
208
 
209
  # Check if the tool requires an LLM engine
210
  # NOTE FIXME may need to refine base.py and tool.py to handle this better
211
+ inputs = {}
212
  if getattr(tool_class, 'require_llm_engine', False):
213
  # Instantiate the tool with the model_string
214
+ inputs['model_string'] = self.llm_engine_name
215
+ if getattr(tool_class, 'require_api_key', False):
216
+ # Instantiate the tool with the api_key
217
+ inputs['api_key'] = self.api_key
218
+ tool = tool_class(**inputs)
219
 
220
  # Set the custom output directory
221
  # NOTE FIXME: May have a better way to handle this
opentools/models/initializer.py CHANGED
@@ -7,11 +7,12 @@ from typing import Dict, Any, List, Tuple
7
 
8
 
9
  class Initializer:
10
- def __init__(self, enabled_tools: List[str] = [], model_string: str = None):
11
  self.toolbox_metadata = {}
12
  self.available_tools = []
13
  self.enabled_tools = enabled_tools
14
  self.model_string = model_string # llm model string
 
15
 
16
  print("\nInitializing OpenTools...")
17
  print(f"Enabled tools: {self.enabled_tools}")
@@ -64,10 +65,14 @@ class Initializer:
64
  # print(f"Class __dict__: {obj.__dict__}")
65
  try:
66
  # Check if the tool requires an LLM engine
 
67
  if hasattr(obj, 'require_llm_engine') and obj.require_llm_engine:
68
- tool_instance = obj(model_string=self.model_string)
69
- else:
70
- tool_instance = obj()
 
 
 
71
 
72
  # print(f"\nInstance attributes: {dir(tool_instance)}")
73
  # print(f"\nInstance __dict__: {tool_instance.__dict__}")
 
7
 
8
 
9
  class Initializer:
10
+ def __init__(self, enabled_tools: List[str] = [], model_string: str = None, api_key: str = None):
11
  self.toolbox_metadata = {}
12
  self.available_tools = []
13
  self.enabled_tools = enabled_tools
14
  self.model_string = model_string # llm model string
15
+ self.api_key = api_key
16
 
17
  print("\nInitializing OpenTools...")
18
  print(f"Enabled tools: {self.enabled_tools}")
 
65
  # print(f"Class __dict__: {obj.__dict__}")
66
  try:
67
  # Check if the tool requires an LLM engine
68
+ inputs = {}
69
  if hasattr(obj, 'require_llm_engine') and obj.require_llm_engine:
70
+ inputs['model_string'] = self.model_string
71
+
72
+ if hasattr(obj, 'require_api_key') and obj.require_api_key:
73
+ inputs['api_key'] = self.api_key
74
+
75
+ tool_instance = obj(**inputs)
76
 
77
  # print(f"\nInstance attributes: {dir(tool_instance)}")
78
  # print(f"\nInstance __dict__: {tool_instance.__dict__}")
opentools/models/planner.py CHANGED
@@ -9,10 +9,10 @@ from opentools.models.memory import Memory
9
  from opentools.models.formatters import QueryAnalysis, NextStep, MemoryVerification
10
 
11
  class Planner:
12
- def __init__(self, llm_engine_name: str, toolbox_metadata: dict = None, available_tools: List = None):
13
  self.llm_engine_name = llm_engine_name
14
- self.llm_engine_mm = ChatOpenAI(model_string=llm_engine_name, is_multimodal=True)
15
- self.llm_engine = ChatOpenAI(model_string=llm_engine_name, is_multimodal=False)
16
  self.toolbox_metadata = toolbox_metadata if toolbox_metadata is not None else {}
17
  self.available_tools = available_tools if available_tools is not None else []
18
 
@@ -47,13 +47,10 @@ class Planner:
47
  return image_info
48
 
49
  def generate_base_response(self, question: str, image: str, max_tokens: str = 4000, bytes_mode: bool = False) -> str:
50
- if bytes_mode:
51
- image_info = self.get_image_info_bytes(image)
52
- else:
53
- image_info = self.get_image_info(image)
54
 
55
  input_data = [question]
56
- if image_info and "image_path" in image_info and not bytes_mode:
57
  try:
58
  with open(image_info["image_path"], 'rb') as file:
59
  image_bytes = file.read()
@@ -66,10 +63,7 @@ class Planner:
66
  return self.base_response
67
 
68
  def analyze_query(self, question: str, image: str, bytes_mode: bool = False) -> str:
69
- if bytes_mode:
70
- image_info = self.get_image_info_bytes(image)
71
- else:
72
- image_info = self.get_image_info(image)
73
  print("image_info: ", image_info)
74
 
75
  query_prompt = f"""
@@ -100,9 +94,7 @@ Please present your analysis in a clear, structured format.
100
  """
101
 
102
  input_data = [query_prompt]
103
- if bytes_mode:
104
- image_bytes = image
105
- else:
106
  try:
107
  with open(image_info["image_path"], 'rb') as file:
108
  image_bytes = file.read()
 
9
  from opentools.models.formatters import QueryAnalysis, NextStep, MemoryVerification
10
 
11
  class Planner:
12
+ def __init__(self, llm_engine_name: str, toolbox_metadata: dict = None, available_tools: List = None, api_key: str = None):
13
  self.llm_engine_name = llm_engine_name
14
+ self.llm_engine_mm = ChatOpenAI(model_string=llm_engine_name, is_multimodal=True, api_key=api_key)
15
+ self.llm_engine = ChatOpenAI(model_string=llm_engine_name, is_multimodal=False, api_key=api_key)
16
  self.toolbox_metadata = toolbox_metadata if toolbox_metadata is not None else {}
17
  self.available_tools = available_tools if available_tools is not None else []
18
 
 
47
  return image_info
48
 
49
  def generate_base_response(self, question: str, image: str, max_tokens: str = 4000, bytes_mode: bool = False) -> str:
50
+ image_info = self.get_image_info(image)
 
 
 
51
 
52
  input_data = [question]
53
+ if image_info and "image_path" in image_info:
54
  try:
55
  with open(image_info["image_path"], 'rb') as file:
56
  image_bytes = file.read()
 
63
  return self.base_response
64
 
65
  def analyze_query(self, question: str, image: str, bytes_mode: bool = False) -> str:
66
+ image_info = self.get_image_info(image)
 
 
 
67
  print("image_info: ", image_info)
68
 
69
  query_prompt = f"""
 
94
  """
95
 
96
  input_data = [query_prompt]
97
+ if image_info and "image_path" in image_info:
 
 
98
  try:
99
  with open(image_info["image_path"], 'rb') as file:
100
  image_bytes = file.read()
opentools/models/utlis.py DELETED
@@ -1,73 +0,0 @@
1
- # import json
2
-
3
- # def truncate_result(result, max_length: int = 100000, truncation_indicator: str = "...") -> str:
4
- # """
5
- # Truncate the result to specified length while preserving JSON structure when possible.
6
-
7
- # Args:
8
- # result: The result to truncate (can be str, list, dict, or other types)
9
- # max_length: Maximum length of the output string (default: 1000)
10
- # truncation_indicator: String to indicate truncation (default: "...")
11
-
12
- # Returns:
13
- # str: Truncated string representation of the result
14
- # """
15
- # if isinstance(result, (dict, list)):
16
- # try:
17
- # result_str = json.dumps(result, ensure_ascii=False)
18
- # except:
19
- # result_str = str(result)
20
- # else:
21
- # result_str = str(result)
22
-
23
- # indicator_length = len(truncation_indicator)
24
-
25
- # if len(result_str) > max_length:
26
- # # For JSON-like strings, try to find the last complete structure
27
- # if result_str.startswith('{') or result_str.startswith('['):
28
- # # Find last complete element
29
- # pos = max_length - indicator_length
30
- # while pos > 0 and not (
31
- # result_str[pos] in ',]}' and
32
- # result_str[pos:].count('"') % 2 == 0
33
- # ):
34
- # pos -= 1
35
- # if pos > 0:
36
- # return result_str[:pos + 1] + truncation_indicator
37
-
38
- # # Default truncation if not JSON or no suitable truncation point found
39
- # return result_str[:max_length - indicator_length] + truncation_indicator
40
-
41
- # return result_str
42
-
43
- def make_json_serializable(obj):
44
- if isinstance(obj, (str, int, float, bool, type(None))):
45
- return obj
46
- elif isinstance(obj, dict):
47
- return {make_json_serializable(key): make_json_serializable(value) for key, value in obj.items()}
48
- elif isinstance(obj, list):
49
- return [make_json_serializable(element) for element in obj]
50
- elif hasattr(obj, '__dict__'):
51
- return make_json_serializable(obj.__dict__)
52
- else:
53
- return str(obj)
54
-
55
-
56
- def make_json_serializable_truncated(obj, max_length: int = 100000):
57
- if isinstance(obj, (int, float, bool, type(None))):
58
- if isinstance(obj, (int, float)) and len(str(obj)) > max_length:
59
- return str(obj)[:max_length - 3] + "..."
60
- return obj
61
- elif isinstance(obj, str):
62
- return obj if len(obj) <= max_length else obj[:max_length - 3] + "..."
63
- elif isinstance(obj, dict):
64
- return {make_json_serializable_truncated(key, max_length): make_json_serializable_truncated(value, max_length)
65
- for key, value in obj.items()}
66
- elif isinstance(obj, list):
67
- return [make_json_serializable_truncated(element, max_length) for element in obj]
68
- elif hasattr(obj, '__dict__'):
69
- return make_json_serializable_truncated(obj.__dict__, max_length)
70
- else:
71
- result = str(obj)
72
- return result if len(result) <= max_length else result[:max_length - 3] + "..."
73
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
opentools/setup.py DELETED
@@ -1,20 +0,0 @@
1
- from setuptools import setup, find_packages
2
-
3
- setup(
4
- name='opentools',
5
- version='0.1.0',
6
- # description='A flexible and versatile toolbox agent framework for complex tasks in both general and scientific scenarios.',
7
- # long_description=open('README.md').read(),
8
- # long_description_content_type='text/markdown',
9
- # author='Pan Lu, Bowen Chen, Sheng Liu',
10
- # author_email='[email protected]',
11
- # url='', # You can add a GitHub or project URL here
12
- packages=find_packages(),
13
- # install_requires=open('requirements.txt').read().splitlines(),
14
- # classifiers=[
15
- # 'Programming Language :: Python :: 3',
16
- # 'License :: OSI Approved :: MIT License',
17
- # 'Operating System :: OS Independent',
18
- # ],
19
- # python_requires='>=3.10',
20
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
opentools/tools/generalist_solution_generator/tool.py CHANGED
@@ -4,8 +4,9 @@ from opentools.engine.openai import ChatOpenAI
4
 
5
  class Generalist_Solution_Generator_Tool(BaseTool):
6
  require_llm_engine = True
 
7
 
8
- def __init__(self, model_string="gpt-4o-mini"):
9
  super().__init__(
10
  tool_name="Generalist_Solution_Generator_Tool",
11
  tool_description="A generalized tool that takes query from the user as prompt, and answers the question step by step to the best of its ability. It can also accept an image.",
@@ -72,12 +73,13 @@ class Generalist_Solution_Generator_Tool(BaseTool):
72
  # }
73
  )
74
  self.model_string = model_string
 
75
 
76
  def execute(self, prompt, image=None):
77
 
78
  print(f"\nInitializing Generalist Tool with model: {self.model_string}")
79
  multimodal = True if image else False
80
- llm_engine = ChatOpenAI(model_string=self.model_string, is_multimodal=multimodal)
81
 
82
  try:
83
  input_data = [prompt]
 
4
 
5
  class Generalist_Solution_Generator_Tool(BaseTool):
6
  require_llm_engine = True
7
+ require_api_key = True
8
 
9
+ def __init__(self, model_string="gpt-4o-mini", api_key=None):
10
  super().__init__(
11
  tool_name="Generalist_Solution_Generator_Tool",
12
  tool_description="A generalized tool that takes query from the user as prompt, and answers the question step by step to the best of its ability. It can also accept an image.",
 
73
  # }
74
  )
75
  self.model_string = model_string
76
+ self.api_key = api_key
77
 
78
  def execute(self, prompt, image=None):
79
 
80
  print(f"\nInitializing Generalist Tool with model: {self.model_string}")
81
  multimodal = True if image else False
82
+ llm_engine = ChatOpenAI(model_string=self.model_string, is_multimodal=multimodal, api_key=self.api_key)
83
 
84
  try:
85
  input_data = [prompt]
setup.py DELETED
@@ -1,20 +0,0 @@
1
- from setuptools import setup, find_packages
2
-
3
- setup(
4
- name='opentools',
5
- version='0.1.0',
6
- # description='A flexible and versatile toolbox agent framework for complex tasks in both general and scientific scenarios.',
7
- # long_description=open('README.md').read(),
8
- # long_description_content_type='text/markdown',
9
- # author='Pan Lu, Bowen Chen, Sheng Liu',
10
- # author_email='[email protected]',
11
- # url='', # You can add a GitHub or project URL here
12
- packages=find_packages(),
13
- # install_requires=open('requirements.txt').read().splitlines(),
14
- # classifiers=[
15
- # 'Programming Language :: Python :: 3',
16
- # 'License :: OSI Approved :: MIT License',
17
- # 'Operating System :: OS Independent',
18
- # ],
19
- # python_requires='>=3.10',
20
- )