Spaces:
Sleeping
Sleeping
Add code generation
Browse files- app.py +7 -2
- autogen_utils.py +158 -2
- requirements.txt +1 -0
app.py
CHANGED
@@ -10,15 +10,16 @@ from autogen_utils import (
|
|
10 |
MathUserProxyAgent,
|
11 |
RetrieveUserProxyAgent,
|
12 |
check_termination_and_human_reply,
|
|
|
13 |
get_retrieve_config,
|
14 |
initialize_agents,
|
15 |
)
|
16 |
from configs import Q1, Q2, Q3, TIMEOUT, TITLE
|
17 |
from custom_widgets import RowAgentWidget
|
18 |
from panel.chat import ChatInterface
|
19 |
-
from panel.widgets import Button, PasswordInput, Switch,
|
20 |
|
21 |
-
pn.extension(
|
22 |
|
23 |
template = pn.template.BootstrapTemplate(title=TITLE)
|
24 |
|
@@ -233,6 +234,7 @@ async def reply_chat(contents, user, instance):
|
|
233 |
|
234 |
if not init_sender:
|
235 |
init_sender = agents[0]
|
|
|
236 |
await agents_chat(init_sender, manager, contents, agents)
|
237 |
return "The task is done. Please start a new task."
|
238 |
|
@@ -443,4 +445,7 @@ btn_example2.on_click(load_example)
|
|
443 |
btn_example3.on_click(load_example)
|
444 |
btn_example4.on_click(load_example)
|
445 |
|
|
|
|
|
|
|
446 |
template.servable(title=TITLE)
|
|
|
10 |
MathUserProxyAgent,
|
11 |
RetrieveUserProxyAgent,
|
12 |
check_termination_and_human_reply,
|
13 |
+
generate_code,
|
14 |
get_retrieve_config,
|
15 |
initialize_agents,
|
16 |
)
|
17 |
from configs import Q1, Q2, Q3, TIMEOUT, TITLE
|
18 |
from custom_widgets import RowAgentWidget
|
19 |
from panel.chat import ChatInterface
|
20 |
+
from panel.widgets import Button, CodeEditor, PasswordInput, Switch, TextInput
|
21 |
|
22 |
+
pn.extension("codeeditor")
|
23 |
|
24 |
template = pn.template.BootstrapTemplate(title=TITLE)
|
25 |
|
|
|
234 |
|
235 |
if not init_sender:
|
236 |
init_sender = agents[0]
|
237 |
+
await generate_code(agents, manager, contents, code_editor)
|
238 |
await agents_chat(init_sender, manager, contents, agents)
|
239 |
return "The task is done. Please start a new task."
|
240 |
|
|
|
445 |
btn_example3.on_click(load_example)
|
446 |
btn_example4.on_click(load_example)
|
447 |
|
448 |
+
code_editor = CodeEditor(value="", sizing_mode="stretch_width", language="python", height=300)
|
449 |
+
template.main.append(code_editor)
|
450 |
+
|
451 |
template.servable(title=TITLE)
|
autogen_utils.py
CHANGED
@@ -1,11 +1,13 @@
|
|
1 |
import asyncio
|
2 |
import sys
|
|
|
3 |
import threading
|
4 |
import time
|
5 |
from ast import literal_eval
|
6 |
|
7 |
import autogen
|
8 |
import chromadb
|
|
|
9 |
import panel as pn
|
10 |
from autogen import Agent, AssistantAgent, UserProxyAgent
|
11 |
from autogen.agentchat.contrib.compressible_agent import CompressibleAgent
|
@@ -17,7 +19,6 @@ from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProx
|
|
17 |
from autogen.agentchat.contrib.teachable_agent import TeachableAgent
|
18 |
from autogen.code_utils import extract_code
|
19 |
from configs import Q1, Q2, Q3, TIMEOUT, TITLE
|
20 |
-
from panel.widgets import TextAreaInput
|
21 |
|
22 |
try:
|
23 |
from termcolor import colored
|
@@ -160,7 +161,7 @@ async def get_human_input(name, prompt: str, instance=None) -> str:
|
|
160 |
"""Get human input."""
|
161 |
if instance is None:
|
162 |
return input(prompt)
|
163 |
-
get_input_widget = TextAreaInput(placeholder=prompt, name="", sizing_mode="stretch_width")
|
164 |
get_input_checkbox = pn.widgets.Checkbox(name="Check to Submit Feedback")
|
165 |
instance.send(pn.Row(get_input_widget, get_input_checkbox), user=name, respond=False)
|
166 |
ts = time.time()
|
@@ -258,3 +259,158 @@ async def check_termination_and_human_reply(
|
|
258 |
print(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True)
|
259 |
|
260 |
return False, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import asyncio
|
2 |
import sys
|
3 |
+
import textwrap
|
4 |
import threading
|
5 |
import time
|
6 |
from ast import literal_eval
|
7 |
|
8 |
import autogen
|
9 |
import chromadb
|
10 |
+
import isort
|
11 |
import panel as pn
|
12 |
from autogen import Agent, AssistantAgent, UserProxyAgent
|
13 |
from autogen.agentchat.contrib.compressible_agent import CompressibleAgent
|
|
|
19 |
from autogen.agentchat.contrib.teachable_agent import TeachableAgent
|
20 |
from autogen.code_utils import extract_code
|
21 |
from configs import Q1, Q2, Q3, TIMEOUT, TITLE
|
|
|
22 |
|
23 |
try:
|
24 |
from termcolor import colored
|
|
|
161 |
"""Get human input."""
|
162 |
if instance is None:
|
163 |
return input(prompt)
|
164 |
+
get_input_widget = pn.widgets.TextAreaInput(placeholder=prompt, name="", sizing_mode="stretch_width")
|
165 |
get_input_checkbox = pn.widgets.Checkbox(name="Check to Submit Feedback")
|
166 |
instance.send(pn.Row(get_input_widget, get_input_checkbox), user=name, respond=False)
|
167 |
ts = time.time()
|
|
|
259 |
print(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True)
|
260 |
|
261 |
return False, None
|
262 |
+
|
263 |
+
|
264 |
+
async def generate_code(agents, manager, contents, code_editor):
|
265 |
+
code = """import autogen
|
266 |
+
import os
|
267 |
+
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
|
268 |
+
from autogen.agentchat.contrib.math_user_proxy_agent import MathUserProxyAgent
|
269 |
+
|
270 |
+
config_list = autogen.config_list_from_json(
|
271 |
+
"OAI_CONFIG_LIST",
|
272 |
+
file_location=".",
|
273 |
+
)
|
274 |
+
if not config_list:
|
275 |
+
os.environ["MODEL"] = "<your model name>"
|
276 |
+
os.environ["OPENAI_API_KEY"] = "<your openai api key>"
|
277 |
+
os.environ["OPENAI_BASE_URL"] = "<your openai base url>" # optional
|
278 |
+
|
279 |
+
config_list = autogen.config_list_from_models(
|
280 |
+
model_list=[os.environ.get("MODEL", "gpt-35-turbo")],
|
281 |
+
)
|
282 |
+
|
283 |
+
llm_config = {
|
284 |
+
"timeout": 60,
|
285 |
+
"cache_seed": 42,
|
286 |
+
"config_list": config_list,
|
287 |
+
"temperature": 0,
|
288 |
+
}
|
289 |
+
|
290 |
+
def termination_msg(x):
|
291 |
+
_msg = str(x.get("content", "")).upper().strip().strip("\\n").strip(".")
|
292 |
+
return isinstance(x, dict) and (_msg.endswith("TERMINATE") or _msg.startswith("TERMINATE"))
|
293 |
+
|
294 |
+
agents = []
|
295 |
+
|
296 |
+
"""
|
297 |
+
|
298 |
+
for agent in agents:
|
299 |
+
if isinstance(agent, RetrieveUserProxyAgent):
|
300 |
+
_code = f"""from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
|
301 |
+
|
302 |
+
agent = RetrieveUserProxyAgent(
|
303 |
+
name="{agent.name}",
|
304 |
+
is_termination_msg=termination_msg,
|
305 |
+
human_input_mode="TERMINATE",
|
306 |
+
max_consecutive_auto_reply=5,
|
307 |
+
retrieve_config={agent._retrieve_config},
|
308 |
+
code_execution_config={agent._code_execution_config}, # set to False if you don't want to execute the code
|
309 |
+
default_auto_reply="Please reply exactly `TERMINATE` to me if the task is done.",
|
310 |
+
)
|
311 |
+
|
312 |
+
"""
|
313 |
+
elif isinstance(agent, GPTAssistantAgent):
|
314 |
+
_code = f"""from auotgen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
|
315 |
+
|
316 |
+
agent = GPTAssistantAgent(
|
317 |
+
name="{agent.name}",
|
318 |
+
instructions="{agent.system_message}",
|
319 |
+
llm_config=llm_config,
|
320 |
+
is_termination_msg=termination_msg,
|
321 |
+
)
|
322 |
+
|
323 |
+
"""
|
324 |
+
elif isinstance(agent, CompressibleAgent):
|
325 |
+
_code = f"""from autogen.agentchat.contrib.compressible_agent import CompressibleAgent
|
326 |
+
|
327 |
+
compress_config = {{
|
328 |
+
"mode": "COMPRESS",
|
329 |
+
"trigger_count": 600, # set this to a large number for less frequent compression
|
330 |
+
"verbose": True, # to allow printing of compression information: contex before and after compression
|
331 |
+
"leave_last_n": 2,
|
332 |
+
}}
|
333 |
+
|
334 |
+
agent = CompressibleAgent(
|
335 |
+
name="{agent.name}",
|
336 |
+
system_message={agent.system_msg},
|
337 |
+
llm_config=llm_config,
|
338 |
+
compress_config=compress_config,
|
339 |
+
is_termination_msg=termination_msg,
|
340 |
+
)
|
341 |
+
|
342 |
+
"""
|
343 |
+
elif isinstance(agent, UserProxyAgent):
|
344 |
+
_code = f"""from autogen import UserProxyAgent
|
345 |
+
|
346 |
+
agent = UserProxyAgent(
|
347 |
+
name="{agent.name}",
|
348 |
+
is_termination_msg=termination_msg,
|
349 |
+
human_input_mode="TERMINATE",
|
350 |
+
default_auto_reply="Please reply exactly `TERMINATE` to me if the task is done.",
|
351 |
+
max_consecutive_auto_reply=5,
|
352 |
+
code_execution_config={agent._code_execution_config},
|
353 |
+
)
|
354 |
+
|
355 |
+
"""
|
356 |
+
elif isinstance(agent, RetrieveAssistantAgent):
|
357 |
+
_code = f"""from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent
|
358 |
+
|
359 |
+
agent = RetrieveAssistantAgent(
|
360 |
+
name="{agent.name}",
|
361 |
+
system_message="{agent.system_message}",
|
362 |
+
llm_config=llm_config,
|
363 |
+
is_termination_msg=termination_msg,
|
364 |
+
retrieve_config={agent._retrieve_config},
|
365 |
+
)
|
366 |
+
|
367 |
+
"""
|
368 |
+
elif isinstance(agent, AssistantAgent):
|
369 |
+
_code = f"""from autogen import AssistantAgent
|
370 |
+
|
371 |
+
agent = AssistantAgent(
|
372 |
+
name="{agent.name}",
|
373 |
+
system_message="{agent.system_message}",
|
374 |
+
llm_config=llm_config,
|
375 |
+
is_termination_msg=termination_msg,
|
376 |
+
)
|
377 |
+
|
378 |
+
"""
|
379 |
+
code += _code + "\n" + "agents.append(agent)\n\n"
|
380 |
+
|
381 |
+
_code = """
|
382 |
+
for agent in agents:
|
383 |
+
if "UserProxy" in str(type(agent)):
|
384 |
+
init_sender = agent
|
385 |
+
break
|
386 |
+
|
387 |
+
if not init_sender:
|
388 |
+
init_sender = agents[0]
|
389 |
+
|
390 |
+
"""
|
391 |
+
code += _code
|
392 |
+
|
393 |
+
if manager:
|
394 |
+
_code = """
|
395 |
+
groupchat = autogen.GroupChat(
|
396 |
+
agents=agents, messages=[], max_round=12, speaker_selection_method="round_robin", allow_repeat_speaker=False
|
397 |
+
) # todo: auto, sometimes message has no name
|
398 |
+
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
|
399 |
+
|
400 |
+
recipient = manager
|
401 |
+
"""
|
402 |
+
else:
|
403 |
+
_code = """
|
404 |
+
recipient = agents[1] if agents[1] != init_sender else agents[0]
|
405 |
+
"""
|
406 |
+
code += _code
|
407 |
+
|
408 |
+
_code = f"""
|
409 |
+
if isinstance(init_sender, (RetrieveUserProxyAgent, MathUserProxyAgent)):
|
410 |
+
init_sender.initiate_chat(recipient, problem="{contents}")
|
411 |
+
else:
|
412 |
+
init_sender.initiate_chat(recipient, message="{contents}")
|
413 |
+
"""
|
414 |
+
code += _code
|
415 |
+
code = textwrap.dedent(code)
|
416 |
+
code_editor.value = isort.code(code)
|
requirements.txt
CHANGED
@@ -3,3 +3,4 @@ panel>=1.3.1
|
|
3 |
cloudpickle
|
4 |
diskcache
|
5 |
yfinance
|
|
|
|
3 |
cloudpickle
|
4 |
diskcache
|
5 |
yfinance
|
6 |
+
isort
|