Spaces:
Running
Running
Reorged folders.
Browse files- .gitignore +2 -0
- Dockerfile +8 -13
- {demo β app}/config.py +4 -2
- {demo β app}/examples/log.csv +0 -0
- {demo β app}/gradio_meta_prompt.py +3 -3
- config.yml +10 -10
- meta_prompt/__init__.py +4 -0
- {src/meta_prompt β meta_prompt}/meta_prompt.py +0 -0
- poetry.lock +0 -0
- pyproject.toml +24 -0
- src/meta_prompt/__init__.py +0 -1
- tests/meta_prompt_graph_test.py +3 -3
.gitignore
CHANGED
@@ -3,3 +3,5 @@
|
|
3 |
__pycache__
|
4 |
.env
|
5 |
config.yml.debug
|
|
|
|
|
|
3 |
__pycache__
|
4 |
.env
|
5 |
config.yml.debug
|
6 |
+
debug.yml
|
7 |
+
dist
|
Dockerfile
CHANGED
@@ -1,25 +1,20 @@
|
|
1 |
# Use an official Python runtime as the base image
|
2 |
-
FROM python:3.
|
3 |
|
4 |
# Set the working directory in the container
|
5 |
WORKDIR /app
|
6 |
|
7 |
# Copy all files from the current directory to the working directory in the container
|
8 |
-
COPY . /app/
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
RUN
|
12 |
-
|
13 |
-
# Set the environment variables
|
14 |
-
ENV API_KEY=""
|
15 |
-
ENV PROXY=""
|
16 |
-
ENV OPENAI_API_BASE=""
|
17 |
-
ENV OTHER_ARGS="--advanced_mode"
|
18 |
-
ENV ADVANCED_MODE="true"
|
19 |
-
ENV SERVER_NAME="0.0.0.0"
|
20 |
|
21 |
# Expose the port (if necessary)
|
22 |
EXPOSE 7860
|
23 |
|
24 |
# Run the script when the container launches
|
25 |
-
CMD /
|
|
|
1 |
# Use an official Python runtime as the base image
|
2 |
+
FROM python:3.10
|
3 |
|
4 |
# Set the working directory in the container
|
5 |
WORKDIR /app
|
6 |
|
7 |
# Copy all files from the current directory to the working directory in the container
|
8 |
+
COPY config.yml poetry.lock pyproject.toml /app/
|
9 |
+
COPY demo /app/demo/
|
10 |
+
COPY meta_prompt /app/meta_prompt/
|
11 |
|
12 |
+
RUN pip install --no-cache-dir -U poetry
|
13 |
+
RUN poetry config virtualenvs.create false
|
14 |
+
RUN poetry install --with=dev
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
# Expose the port (if necessary)
|
17 |
EXPOSE 7860
|
18 |
|
19 |
# Run the script when the container launches
|
20 |
+
CMD python demo/gradio_meta_prompt.py
|
{demo β app}/config.py
RENAMED
@@ -12,5 +12,7 @@ class LLMConfig(BaseModel):
|
|
12 |
class MetaPromptConfig(BaseConfig):
|
13 |
llms: Optional[dict[str, LLMConfig]]
|
14 |
examples_path: Optional[str]
|
15 |
-
server_name: Optional[str] =
|
16 |
-
server_port: Optional[int] =
|
|
|
|
|
|
12 |
class MetaPromptConfig(BaseConfig):
|
13 |
llms: Optional[dict[str, LLMConfig]]
|
14 |
examples_path: Optional[str]
|
15 |
+
server_name: Optional[str] = None
|
16 |
+
server_port: Optional[int] = None
|
17 |
+
recursion_limit: Optional[int] = 25
|
18 |
+
recursion_limit_max: Optional[int] = 50
|
{demo β app}/examples/log.csv
RENAMED
File without changes
|
{demo β app}/gradio_meta_prompt.py
RENAMED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
from confz import BaseConfig, CLArgSource, EnvSource, FileSource
|
3 |
from meta_prompt import MetaPromptGraph, AgentState
|
4 |
from langchain_openai import ChatOpenAI
|
5 |
-
from config import MetaPromptConfig
|
6 |
|
7 |
class LLMModelFactory:
|
8 |
def __init__(self):
|
@@ -85,8 +85,8 @@ iface = gr.Interface(
|
|
85 |
],
|
86 |
additional_inputs=[
|
87 |
gr.Textbox(label="Initial System Message", show_copy_button=True, value=""),
|
88 |
-
gr.Number(label="Recursion Limit", value=
|
89 |
-
precision=0, minimum=1, maximum=
|
90 |
gr.Dropdown(
|
91 |
label="Model Name",
|
92 |
choices=config.llms.keys(),
|
|
|
2 |
from confz import BaseConfig, CLArgSource, EnvSource, FileSource
|
3 |
from meta_prompt import MetaPromptGraph, AgentState
|
4 |
from langchain_openai import ChatOpenAI
|
5 |
+
from app.config import MetaPromptConfig
|
6 |
|
7 |
class LLMModelFactory:
|
8 |
def __init__(self):
|
|
|
85 |
],
|
86 |
additional_inputs=[
|
87 |
gr.Textbox(label="Initial System Message", show_copy_button=True, value=""),
|
88 |
+
gr.Number(label="Recursion Limit", value=config.recursion_limit,
|
89 |
+
precision=0, minimum=1, maximum=config.recursion_limit_max, step=1),
|
90 |
gr.Dropdown(
|
91 |
label="Model Name",
|
92 |
choices=config.llms.keys(),
|
config.yml
CHANGED
@@ -1,4 +1,12 @@
|
|
1 |
llms:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
anthropic/claude-3-haiku:
|
3 |
type: ChatOpenAI
|
4 |
temperature: 0.1
|
@@ -23,15 +31,7 @@ llms:
|
|
23 |
openai_api_base: "https://openrouter.ai/api/v1"
|
24 |
max_tokens: 8192
|
25 |
verbose: true
|
26 |
-
groq/llama3-70b-8192:
|
27 |
-
type: ChatOpenAI
|
28 |
-
temperature: 0.1
|
29 |
-
model_name: "llama3-70b-8192"
|
30 |
-
openai_api_key: ""
|
31 |
-
openai_api_base: "https://api.groq.com/openai/v1"
|
32 |
-
max_tokens: 8192
|
33 |
-
verbose: true
|
34 |
|
35 |
-
examples_path: "
|
36 |
server_name: 0.0.0.0
|
37 |
-
server_port:
|
|
|
1 |
llms:
|
2 |
+
groq/llama3-70b-8192:
|
3 |
+
type: ChatOpenAI
|
4 |
+
temperature: 0.1
|
5 |
+
model_name: "llama3-70b-8192"
|
6 |
+
openai_api_key: ""
|
7 |
+
openai_api_base: "https://api.groq.com/openai/v1"
|
8 |
+
max_tokens: 8192
|
9 |
+
verbose: true
|
10 |
anthropic/claude-3-haiku:
|
11 |
type: ChatOpenAI
|
12 |
temperature: 0.1
|
|
|
31 |
openai_api_base: "https://openrouter.ai/api/v1"
|
32 |
max_tokens: 8192
|
33 |
verbose: true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
examples_path: "app/examples"
|
36 |
server_name: 0.0.0.0
|
37 |
+
server_port: 7860
|
meta_prompt/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__version__ = '0.1.0'
|
2 |
+
|
3 |
+
from .meta_prompt import AgentState, MetaPromptGraph
|
4 |
+
|
{src/meta_prompt β meta_prompt}/meta_prompt.py
RENAMED
File without changes
|
poetry.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "meta-prompt"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "This package generates high quality prompts with input and expected output examples."
|
5 |
+
authors = ["Yale Huang <[email protected]>"]
|
6 |
+
license = "MIT"
|
7 |
+
|
8 |
+
[tool.poetry.dependencies]
|
9 |
+
python = "^3.10"
|
10 |
+
langgraph = "^0.1.5"
|
11 |
+
langchain = "^0.2.6"
|
12 |
+
langchain-openai = "^0.1.14"
|
13 |
+
pydantic = "^2.8.2"
|
14 |
+
|
15 |
+
[tool.poetry.dev-dependencies]
|
16 |
+
gradio = "^4.37.2"
|
17 |
+
confz = "^2.0.1"
|
18 |
+
|
19 |
+
[tool.poetry.group.dev.dependencies]
|
20 |
+
pytest = "^8.2.2"
|
21 |
+
|
22 |
+
[build-system]
|
23 |
+
requires = ["poetry-core>=1.0.0"]
|
24 |
+
build-backend = "poetry.core.masonry.api"
|
src/meta_prompt/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .meta_prompt import AgentState, MetaPromptGraph
|
|
|
|
tests/meta_prompt_graph_test.py
CHANGED
@@ -79,9 +79,9 @@ class TestMetaPromptGraph(unittest.TestCase):
|
|
79 |
assert updated_state.accepted == True
|
80 |
|
81 |
def test_workflow_execution(self):
|
82 |
-
MODEL_NAME = "anthropic/claude-3.5-sonnet:
|
83 |
# MODEL_NAME = "meta-llama/llama-3-70b-instruct"
|
84 |
-
|
85 |
# MODEL_NAME = "google/gemma-2-9b-it"
|
86 |
# MODEL_NAME = "recursal/eagle-7b"
|
87 |
# MODEL_NAME = "meta-llama/llama-3-8b-instruct"
|
@@ -116,7 +116,7 @@ class TestMetaPromptGraph(unittest.TestCase):
|
|
116 |
print(result.content)
|
117 |
|
118 |
def test_workflow_execution_with_llms(self):
|
119 |
-
optimizer_llm = ChatOpenAI(model_name="
|
120 |
executor_llm = ChatOpenAI(model_name="meta-llama/llama-3-8b-instruct", temperature=0.01)
|
121 |
|
122 |
llms = {
|
|
|
79 |
assert updated_state.accepted == True
|
80 |
|
81 |
def test_workflow_execution(self):
|
82 |
+
# MODEL_NAME = "anthropic/claude-3.5-sonnet:beta"
|
83 |
# MODEL_NAME = "meta-llama/llama-3-70b-instruct"
|
84 |
+
MODEL_NAME = "deepseek/deepseek-chat"
|
85 |
# MODEL_NAME = "google/gemma-2-9b-it"
|
86 |
# MODEL_NAME = "recursal/eagle-7b"
|
87 |
# MODEL_NAME = "meta-llama/llama-3-8b-instruct"
|
|
|
116 |
print(result.content)
|
117 |
|
118 |
def test_workflow_execution_with_llms(self):
|
119 |
+
optimizer_llm = ChatOpenAI(model_name="deepseek/deepseek-chat", temperature=0.5)
|
120 |
executor_llm = ChatOpenAI(model_name="meta-llama/llama-3-8b-instruct", temperature=0.01)
|
121 |
|
122 |
llms = {
|