Spaces:
Runtime error
Runtime error
Update utils/langgraph_pipeline.py
Browse files- utils/langgraph_pipeline.py +50 -12
utils/langgraph_pipeline.py
CHANGED
@@ -8,13 +8,17 @@ from agents import (
|
|
8 |
quality_assurance_agent,
|
9 |
)
|
10 |
from langchain_core.messages import HumanMessage, AIMessage
|
11 |
-
from typing import TypedDict, List
|
12 |
from langchain_core.messages.base import BaseMessage
|
|
|
|
|
13 |
|
|
|
|
|
14 |
class InputState(TypedDict):
|
15 |
messages: List[BaseMessage]
|
16 |
chat_log: list
|
17 |
|
|
|
18 |
class OutputState(TypedDict):
|
19 |
pm_output: str
|
20 |
proj_output: str
|
@@ -23,36 +27,64 @@ class OutputState(TypedDict):
|
|
23 |
qa_output: str
|
24 |
chat_log: list
|
25 |
|
26 |
-
|
|
|
|
|
|
|
|
|
27 |
"""
|
28 |
-
|
29 |
"""
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
-
|
|
|
33 |
|
34 |
A new product request has been submitted:
|
35 |
|
36 |
-
"{
|
37 |
|
38 |
-
Please analyze and
|
|
|
|
|
|
|
|
|
39 |
"""
|
40 |
|
41 |
return {
|
42 |
-
"messages": state["messages"] + [AIMessage(content=
|
43 |
-
"chat_log": state["chat_log"] + [{"role": "System", "content":
|
44 |
}
|
45 |
|
46 |
-
# Define the bridge node as a simple function node
|
47 |
-
def bridge_node(state: dict) -> dict:
|
48 |
-
return bridge_to_product_manager(state)
|
49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
pm_node = ToolNode([product_manager_agent.run])
|
51 |
proj_node = ToolNode([project_manager_agent.run])
|
52 |
arch_node = ToolNode([software_architect_agent.run])
|
53 |
dev_node = ToolNode([software_engineer_agent.run])
|
54 |
qa_node = ToolNode([quality_assurance_agent.run])
|
55 |
|
|
|
|
|
|
|
|
|
56 |
graph = StateGraph(input=InputState, output=OutputState)
|
57 |
|
58 |
graph.add_node("Bridge", bridge_node)
|
@@ -72,10 +104,16 @@ graph.add_edge("QualityAssurance", END)
|
|
72 |
|
73 |
compiled_graph = graph.compile()
|
74 |
|
|
|
|
|
|
|
|
|
75 |
def run_pipeline_and_save(prompt: str):
|
76 |
initial_state = {
|
77 |
"messages": [HumanMessage(content=prompt)],
|
78 |
"chat_log": [],
|
79 |
}
|
|
|
80 |
final_state = compiled_graph.invoke(initial_state)
|
|
|
81 |
return final_state["chat_log"], final_state["qa_output"]
|
|
|
8 |
quality_assurance_agent,
|
9 |
)
|
10 |
from langchain_core.messages import HumanMessage, AIMessage
|
|
|
11 |
from langchain_core.messages.base import BaseMessage
|
12 |
+
from langchain_core.tools import StructuredTool
|
13 |
+
from typing import TypedDict, List
|
14 |
|
15 |
+
|
16 |
+
# Define LangGraph input/output state types
|
17 |
class InputState(TypedDict):
|
18 |
messages: List[BaseMessage]
|
19 |
chat_log: list
|
20 |
|
21 |
+
|
22 |
class OutputState(TypedDict):
|
23 |
pm_output: str
|
24 |
proj_output: str
|
|
|
27 |
qa_output: str
|
28 |
chat_log: list
|
29 |
|
30 |
+
|
31 |
+
# -----------------------------
|
32 |
+
# Bridge Node Logic (Fixed ✅)
|
33 |
+
# -----------------------------
|
34 |
+
def bridge_to_product_manager_fn(state: dict) -> dict:
|
35 |
"""
|
36 |
+
Converts HumanMessage into structured AIMessage so the Product Manager agent can consume it.
|
37 |
"""
|
38 |
+
if "messages" not in state or not isinstance(state["messages"], list):
|
39 |
+
raise ValueError("Input must contain a 'messages' list.")
|
40 |
+
|
41 |
+
last_msg = state["messages"][-1]
|
42 |
+
if not isinstance(last_msg, HumanMessage):
|
43 |
+
raise ValueError("Last message must be a HumanMessage.")
|
44 |
|
45 |
+
# Structured prompt injected as system-level AIMessage
|
46 |
+
structured_prompt = f"""# Stakeholder Prompt
|
47 |
|
48 |
A new product request has been submitted:
|
49 |
|
50 |
+
"{last_msg.content}"
|
51 |
|
52 |
+
Please analyze and convert this into a structured product specification including:
|
53 |
+
- Goals
|
54 |
+
- Features
|
55 |
+
- User stories
|
56 |
+
- KPIs
|
57 |
"""
|
58 |
|
59 |
return {
|
60 |
+
"messages": state["messages"] + [AIMessage(content=structured_prompt)],
|
61 |
+
"chat_log": state["chat_log"] + [{"role": "System", "content": structured_prompt}],
|
62 |
}
|
63 |
|
|
|
|
|
|
|
64 |
|
65 |
+
# ✅ Manually wrap bridge function as StructuredTool
|
66 |
+
bridge_tool = StructuredTool.from_function(
|
67 |
+
func=bridge_to_product_manager_fn,
|
68 |
+
name="bridge_to_product_manager",
|
69 |
+
description="Converts HumanMessage into structured system prompt for Product Manager."
|
70 |
+
)
|
71 |
+
|
72 |
+
bridge_node = ToolNode([bridge_tool])
|
73 |
+
|
74 |
+
|
75 |
+
# -----------------------------
|
76 |
+
# Define Other Agent ToolNodes
|
77 |
+
# -----------------------------
|
78 |
pm_node = ToolNode([product_manager_agent.run])
|
79 |
proj_node = ToolNode([project_manager_agent.run])
|
80 |
arch_node = ToolNode([software_architect_agent.run])
|
81 |
dev_node = ToolNode([software_engineer_agent.run])
|
82 |
qa_node = ToolNode([quality_assurance_agent.run])
|
83 |
|
84 |
+
|
85 |
+
# -----------------------------
|
86 |
+
# Build LangGraph
|
87 |
+
# -----------------------------
|
88 |
graph = StateGraph(input=InputState, output=OutputState)
|
89 |
|
90 |
graph.add_node("Bridge", bridge_node)
|
|
|
104 |
|
105 |
compiled_graph = graph.compile()
|
106 |
|
107 |
+
|
108 |
+
# -----------------------------
|
109 |
+
# Run the full pipeline
|
110 |
+
# -----------------------------
|
111 |
def run_pipeline_and_save(prompt: str):
|
112 |
initial_state = {
|
113 |
"messages": [HumanMessage(content=prompt)],
|
114 |
"chat_log": [],
|
115 |
}
|
116 |
+
|
117 |
final_state = compiled_graph.invoke(initial_state)
|
118 |
+
|
119 |
return final_state["chat_log"], final_state["qa_output"]
|