Spaces:
Running
Running
Parallel processing for NODE_PROMPT_INITIAL_DEVELOPER and NODE_ACCEPTANCE_CRITERIA_DEVELOPER.
Browse files- meta_prompt/meta_prompt.py +24 -16
- tests/meta_prompt_graph_test.py +2 -2
meta_prompt/meta_prompt.py
CHANGED
@@ -1,17 +1,21 @@
|
|
1 |
import typing
|
2 |
import pprint
|
3 |
import logging
|
4 |
-
|
|
|
5 |
from langchain_core.language_models import BaseLanguageModel
|
6 |
from langchain_core.messages import HumanMessage, SystemMessage
|
7 |
from langchain_core.prompts import ChatPromptTemplate
|
8 |
-
from langgraph.graph import StateGraph, END
|
9 |
from langgraph.checkpoint.memory import MemorySaver
|
10 |
from langgraph.errors import GraphRecursionError
|
11 |
from langchain_core.runnables.base import RunnableLike
|
12 |
from pydantic import BaseModel
|
13 |
from .consts import *
|
14 |
|
|
|
|
|
|
|
15 |
class AgentState(BaseModel):
|
16 |
"""
|
17 |
Represents the state of an agent in a conversation.
|
@@ -30,18 +34,18 @@ class AgentState(BaseModel):
|
|
30 |
- best_system_message (str, optional): The best system message.
|
31 |
- best_output_age (int): The age of the best output.
|
32 |
"""
|
33 |
-
max_output_age: int = 0
|
34 |
-
user_message: Optional[str] = None
|
35 |
-
expected_output: Optional[str] = None
|
36 |
-
acceptance_criteria: Optional[str] = None
|
37 |
-
system_message: Optional[str] = None
|
38 |
-
output: Optional[str] = None
|
39 |
-
suggestions: Optional[str] = None
|
40 |
-
accepted: bool = False
|
41 |
-
analysis: Optional[str] = None
|
42 |
-
best_output: Optional[str] = None
|
43 |
-
best_system_message: Optional[str] = None
|
44 |
-
best_output_age: int = 0
|
45 |
|
46 |
class MetaPromptGraph:
|
47 |
"""
|
@@ -225,10 +229,14 @@ class MetaPromptGraph:
|
|
225 |
"acceptance_criteria",
|
226 |
x),
|
227 |
x))
|
|
|
|
|
|
|
|
|
228 |
|
229 |
-
workflow.add_edge(NODE_PROMPT_INITIAL_DEVELOPER,
|
230 |
workflow.add_edge(NODE_ACCEPTANCE_CRITERIA_DEVELOPER, NODE_PROMPT_EXECUTOR)
|
231 |
-
workflow.set_entry_point(
|
232 |
|
233 |
return workflow
|
234 |
|
|
|
1 |
import typing
|
2 |
import pprint
|
3 |
import logging
|
4 |
+
import operator
|
5 |
+
from typing import Dict, Any, Callable, List, Union, Optional, Annotated
|
6 |
from langchain_core.language_models import BaseLanguageModel
|
7 |
from langchain_core.messages import HumanMessage, SystemMessage
|
8 |
from langchain_core.prompts import ChatPromptTemplate
|
9 |
+
from langgraph.graph import StateGraph, START, END
|
10 |
from langgraph.checkpoint.memory import MemorySaver
|
11 |
from langgraph.errors import GraphRecursionError
|
12 |
from langchain_core.runnables.base import RunnableLike
|
13 |
from pydantic import BaseModel
|
14 |
from .consts import *
|
15 |
|
16 |
+
def first_non_empty(a, b):
|
17 |
+
return next((s for s in (a, b) if s), None)
|
18 |
+
|
19 |
class AgentState(BaseModel):
|
20 |
"""
|
21 |
Represents the state of an agent in a conversation.
|
|
|
34 |
- best_system_message (str, optional): The best system message.
|
35 |
- best_output_age (int): The age of the best output.
|
36 |
"""
|
37 |
+
max_output_age: Annotated[int, lambda x, y: max(x, y)] = 0
|
38 |
+
user_message: Annotated[Optional[str], first_non_empty] = None
|
39 |
+
expected_output: Annotated[Optional[str], first_non_empty] = None
|
40 |
+
acceptance_criteria: Annotated[Optional[str], first_non_empty] = None
|
41 |
+
system_message: Annotated[Optional[str], first_non_empty] = None
|
42 |
+
output: Annotated[Optional[str], first_non_empty] = None
|
43 |
+
suggestions: Annotated[Optional[str], first_non_empty] = None
|
44 |
+
accepted: Annotated[bool, operator.or_] = False
|
45 |
+
analysis: Annotated[Optional[str], first_non_empty] = None
|
46 |
+
best_output: Annotated[Optional[str], first_non_empty] = None
|
47 |
+
best_system_message: Annotated[Optional[str], first_non_empty] = None
|
48 |
+
best_output_age: Annotated[int, lambda x, y: max(x, y)] = 0
|
49 |
|
50 |
class MetaPromptGraph:
|
51 |
"""
|
|
|
229 |
"acceptance_criteria",
|
230 |
x),
|
231 |
x))
|
232 |
+
# workflow.add_node(START)
|
233 |
+
|
234 |
+
workflow.add_edge(START, NODE_PROMPT_INITIAL_DEVELOPER)
|
235 |
+
workflow.add_edge(START, NODE_ACCEPTANCE_CRITERIA_DEVELOPER)
|
236 |
|
237 |
+
workflow.add_edge(NODE_PROMPT_INITIAL_DEVELOPER, NODE_PROMPT_EXECUTOR)
|
238 |
workflow.add_edge(NODE_ACCEPTANCE_CRITERIA_DEVELOPER, NODE_PROMPT_EXECUTOR)
|
239 |
+
# workflow.set_entry_point(START)
|
240 |
|
241 |
return workflow
|
242 |
|
tests/meta_prompt_graph_test.py
CHANGED
@@ -123,8 +123,8 @@ class TestMetaPromptGraph(unittest.TestCase):
|
|
123 |
"""
|
124 |
# MODEL_NAME = "anthropic/claude-3.5-sonnet:beta"
|
125 |
# MODEL_NAME = "meta-llama/llama-3-70b-instruct"
|
126 |
-
MODEL_NAME = "deepseek/deepseek-chat"
|
127 |
-
|
128 |
# MODEL_NAME = "recursal/eagle-7b"
|
129 |
# MODEL_NAME = "meta-llama/llama-3-8b-instruct"
|
130 |
llm = ChatOpenAI(model_name=MODEL_NAME)
|
|
|
123 |
"""
|
124 |
# MODEL_NAME = "anthropic/claude-3.5-sonnet:beta"
|
125 |
# MODEL_NAME = "meta-llama/llama-3-70b-instruct"
|
126 |
+
# MODEL_NAME = "deepseek/deepseek-chat"
|
127 |
+
MODEL_NAME = "google/gemma-2-9b-it"
|
128 |
# MODEL_NAME = "recursal/eagle-7b"
|
129 |
# MODEL_NAME = "meta-llama/llama-3-8b-instruct"
|
130 |
llm = ChatOpenAI(model_name=MODEL_NAME)
|