Upload 5 files
Browse files- README.md +11 -8
- app.py +34 -0
- gitattributes +35 -0
- multi_agent.py +90 -0
- requirements.txt +3 -0
README.md
CHANGED
@@ -1,11 +1,14 @@
|
|
1 |
---
|
2 |
-
title: AI
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk:
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
9 |
---
|
10 |
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Multi-Agent AI - Coding
|
3 |
+
emoji: ✨
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: gray
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.36.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: true
|
10 |
+
license: apache-2.0
|
11 |
+
short_description: Multi-Agent AI with Microsoft AutoGen
|
12 |
---
|
13 |
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import datetime, os, threading
|
3 |
+
|
4 |
+
from multi_agent import run_multi_agent
|
5 |
+
|
6 |
+
lock = threading.Lock()
|
7 |
+
|
8 |
+
LLM = "gpt-4o"
|
9 |
+
|
10 |
+
def invoke(openai_api_key, task):
|
11 |
+
if not openai_api_key:
|
12 |
+
raise gr.Error("OpenAI API Key is required.")
|
13 |
+
|
14 |
+
if not task:
|
15 |
+
raise gr.Error("Task is required.")
|
16 |
+
|
17 |
+
raise gr.Error("Please clone space due to local code execution.")
|
18 |
+
|
19 |
+
with lock:
|
20 |
+
os.environ["OPENAI_API_KEY"] = openai_api_key
|
21 |
+
result = run_multi_agent(LLM, task)
|
22 |
+
del os.environ["OPENAI_API_KEY"]
|
23 |
+
return result
|
24 |
+
|
25 |
+
gr.close_all()
|
26 |
+
|
27 |
+
demo = gr.Interface(fn = invoke,
|
28 |
+
inputs = [gr.Textbox(label = "OpenAI API Key", type = "password", lines = 1),
|
29 |
+
gr.Textbox(label = "Task", value = f"Today is {datetime.date.today()}. {os.environ['INPUT']}")],
|
30 |
+
outputs = [gr.Markdown(label = "Output", value = os.environ["OUTPUT"], line_breaks = True, sanitize_html = False)],
|
31 |
+
title = "Multi-Agent AI: Coding",
|
32 |
+
description = os.environ["DESCRIPTION"])
|
33 |
+
|
34 |
+
demo.launch()
|
gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
multi_agent.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64, datetime, json, os
|
2 |
+
|
3 |
+
from autogen import ConversableAgent, AssistantAgent
|
4 |
+
from autogen.coding import LocalCommandLineCodeExecutor
|
5 |
+
|
6 |
+
def read_file(file_path: str) -> str:
|
7 |
+
with open(file_path, "r", encoding="utf-8") as file:
|
8 |
+
return file.read()
|
9 |
+
|
10 |
+
def read_image_file(image_file_path: str) -> str:
|
11 |
+
with open(image_file_path, "rb") as image_file:
|
12 |
+
image_data = image_file.read()
|
13 |
+
return base64.b64encode(image_data).decode("utf-8")
|
14 |
+
|
15 |
+
def generate_markdown_image(image_data: str) -> str:
|
16 |
+
return f""
|
17 |
+
|
18 |
+
def format_as_markdown(code: str) -> str:
|
19 |
+
markdown_code = '```\n'
|
20 |
+
markdown_code += code
|
21 |
+
markdown_code += '\n```'
|
22 |
+
return markdown_code
|
23 |
+
|
24 |
+
def get_latest_file(directory, file_extension):
|
25 |
+
latest_file = None
|
26 |
+
latest_date = datetime.datetime.min
|
27 |
+
|
28 |
+
for file in os.listdir(directory):
|
29 |
+
if file:
|
30 |
+
_, file_ext = os.path.splitext(file)
|
31 |
+
|
32 |
+
if file_ext == file_extension:
|
33 |
+
file_path = os.path.join(directory, file)
|
34 |
+
file_date = datetime.datetime.fromtimestamp(os.path.getmtime(file_path))
|
35 |
+
|
36 |
+
if file_date > latest_date:
|
37 |
+
latest_date = file_date
|
38 |
+
latest_file = file
|
39 |
+
|
40 |
+
return latest_file
|
41 |
+
|
42 |
+
def run_multi_agent(llm, task):
|
43 |
+
llm_config = {"model": llm}
|
44 |
+
|
45 |
+
executor = LocalCommandLineCodeExecutor(
|
46 |
+
timeout=60,
|
47 |
+
work_dir="coding",
|
48 |
+
)
|
49 |
+
|
50 |
+
code_executor_agent = ConversableAgent(
|
51 |
+
name="code_executor_agent",
|
52 |
+
llm_config=False,
|
53 |
+
code_execution_config={"executor": executor},
|
54 |
+
human_input_mode="NEVER",
|
55 |
+
default_auto_reply="TERMINATE",
|
56 |
+
)
|
57 |
+
|
58 |
+
code_writer_agent = AssistantAgent(
|
59 |
+
name="code_writer_agent",
|
60 |
+
llm_config=llm_config,
|
61 |
+
code_execution_config=False,
|
62 |
+
human_input_mode="NEVER",
|
63 |
+
)
|
64 |
+
|
65 |
+
chat_result = code_executor_agent.initiate_chat(
|
66 |
+
code_writer_agent,
|
67 |
+
message=task,
|
68 |
+
max_turns=10
|
69 |
+
)
|
70 |
+
|
71 |
+
chat = ""
|
72 |
+
first_message = True
|
73 |
+
|
74 |
+
for message in chat_result.chat_history:
|
75 |
+
if not first_message:
|
76 |
+
chat += f"**{message['role'].replace('assistant', 'Code Executor').replace('user', 'Code Writer')}**\n{message['content']}\n\n"
|
77 |
+
first_message = False
|
78 |
+
|
79 |
+
file_name_png = get_latest_file("coding", ".png")
|
80 |
+
|
81 |
+
image_data = read_image_file(f"/home/user/app/coding/{file_name_png}")
|
82 |
+
markdown_code_png = generate_markdown_image(image_data)
|
83 |
+
|
84 |
+
result = f"{markdown_code_png}\n\n{chat}"
|
85 |
+
|
86 |
+
print("===")
|
87 |
+
print(result)
|
88 |
+
print("===")
|
89 |
+
|
90 |
+
return result
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
chess==1.10.0
|
2 |
+
markdown==3.6
|
3 |
+
pyautogen==0.2.25
|