Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- utils/langgraph_pipeline.py +71 -24
utils/langgraph_pipeline.py
CHANGED
@@ -21,6 +21,8 @@ from agents import (
|
|
21 |
class InputState(TypedDict):
|
22 |
messages: List[BaseMessage]
|
23 |
chat_log: List[Dict[str, Any]]
|
|
|
|
|
24 |
|
25 |
class OutputState(TypedDict):
|
26 |
pm_output: str
|
@@ -30,6 +32,8 @@ class OutputState(TypedDict):
|
|
30 |
dev_output: str
|
31 |
qa_output: str
|
32 |
chat_log: List[Dict[str, Any]]
|
|
|
|
|
33 |
|
34 |
# ββββββββββββββ
|
35 |
# 2) Wrap agents so they see full history
|
@@ -37,12 +41,21 @@ class OutputState(TypedDict):
|
|
37 |
def wrap_agent(agent_run, output_key: str):
|
38 |
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
39 |
history = state["messages"]
|
40 |
-
log
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
return {
|
43 |
"messages": history + result["messages"],
|
44 |
-
"chat_log":
|
45 |
-
output_key:
|
|
|
|
|
46 |
}
|
47 |
return node
|
48 |
|
@@ -51,50 +64,84 @@ def wrap_agent(agent_run, output_key: str):
|
|
51 |
# ββββββββββββββ
|
52 |
def bridge_to_pm(state: Dict[str, Any]) -> Dict[str, Any]:
|
53 |
history = state["messages"]
|
54 |
-
log
|
|
|
|
|
|
|
55 |
if not history or not isinstance(history[-1], HumanMessage):
|
56 |
raise ValueError("bridge_to_pm expected a HumanMessage at history end")
|
|
|
57 |
prompt = history[-1].content
|
58 |
spec_prompt = (
|
59 |
-
f"# Stakeholder Prompt\n\n"
|
60 |
f"\"{prompt}\"\n\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
"Generate a structured product specification including:\n"
|
62 |
"- Goals\n"
|
63 |
"- Key features\n"
|
64 |
"- User stories\n"
|
65 |
"- Success metrics\n"
|
66 |
)
|
|
|
67 |
return {
|
68 |
"messages": [AIMessage(content=spec_prompt)],
|
69 |
"chat_log": log + [{"role": "System", "content": spec_prompt}],
|
|
|
|
|
70 |
}
|
71 |
|
72 |
# ββββββββββββββ
|
73 |
-
# 4)
|
74 |
# ββββββββββββββ
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
graph
|
81 |
-
graph.add_node("UIDesigner", wrap_agent(ui_designer_agent.run, "ui_design_output"))
|
82 |
-
graph.add_node("SoftwareEngineer", wrap_agent(software_engineer_agent.run, "dev_output"))
|
83 |
-
graph.add_node("QualityAssurance", wrap_agent(quality_assurance_agent.run, "qa_output"))
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
graph.set_entry_point("BridgePM")
|
86 |
-
graph.add_edge("BridgePM",
|
87 |
-
graph.add_edge("ProductManager",
|
88 |
-
graph.add_edge("ProjectManager",
|
89 |
-
graph.add_edge("SoftwareArchitect","UIDesigner")
|
90 |
-
graph.add_edge("UIDesigner",
|
91 |
graph.add_edge("SoftwareEngineer", "QualityAssurance")
|
92 |
-
graph.add_edge("QualityAssurance",
|
|
|
93 |
|
94 |
compiled_graph = graph.compile()
|
95 |
|
96 |
# ββββββββββββββ
|
97 |
-
#
|
98 |
# ββββββββββββββ
|
99 |
def parse_spec(spec: str) -> Dict[str, List[str]]:
|
100 |
sections: Dict[str, List[str]] = {}
|
@@ -105,11 +152,11 @@ def parse_spec(spec: str) -> Dict[str, List[str]]:
|
|
105 |
return sections
|
106 |
|
107 |
# ββββββββββββββ
|
108 |
-
#
|
109 |
# ββββββββββββββ
|
110 |
def run_pipeline_and_save(prompt: str) -> Tuple[List[Dict[str, Any]], str]:
|
111 |
# a) invoke agents
|
112 |
-
initial_state = {"messages": [HumanMessage(content=prompt)], "chat_log": []}
|
113 |
final_state = compiled_graph.invoke(initial_state)
|
114 |
|
115 |
chat_log = final_state["chat_log"]
|
|
|
21 |
class InputState(TypedDict):
|
22 |
messages: List[BaseMessage]
|
23 |
chat_log: List[Dict[str, Any]]
|
24 |
+
iteration: int
|
25 |
+
feedback: str
|
26 |
|
27 |
class OutputState(TypedDict):
|
28 |
pm_output: str
|
|
|
32 |
dev_output: str
|
33 |
qa_output: str
|
34 |
chat_log: List[Dict[str, Any]]
|
35 |
+
iteration: int
|
36 |
+
feedback: str
|
37 |
|
38 |
# ββββββββββββββ
|
39 |
# 2) Wrap agents so they see full history
|
|
|
41 |
def wrap_agent(agent_run, output_key: str):
|
42 |
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
43 |
history = state["messages"]
|
44 |
+
log = state["chat_log"]
|
45 |
+
iteration = state.get("iteration", 0)
|
46 |
+
feedback = state.get("feedback", "")
|
47 |
+
|
48 |
+
# Add feedback to the prompt if it exists
|
49 |
+
if feedback:
|
50 |
+
history = history + [AIMessage(content=f"Previous feedback: {feedback}")]
|
51 |
+
|
52 |
+
result = agent_run({"messages": history, "chat_log": log})
|
53 |
return {
|
54 |
"messages": history + result["messages"],
|
55 |
+
"chat_log": result["chat_log"],
|
56 |
+
output_key: result[output_key],
|
57 |
+
"iteration": iteration,
|
58 |
+
"feedback": feedback
|
59 |
}
|
60 |
return node
|
61 |
|
|
|
64 |
# ββββββββββββββ
|
65 |
def bridge_to_pm(state: Dict[str, Any]) -> Dict[str, Any]:
|
66 |
history = state["messages"]
|
67 |
+
log = state["chat_log"]
|
68 |
+
iteration = state.get("iteration", 0)
|
69 |
+
feedback = state.get("feedback", "")
|
70 |
+
|
71 |
if not history or not isinstance(history[-1], HumanMessage):
|
72 |
raise ValueError("bridge_to_pm expected a HumanMessage at history end")
|
73 |
+
|
74 |
prompt = history[-1].content
|
75 |
spec_prompt = (
|
76 |
+
f"# Stakeholder Prompt (Iteration {iteration})\n\n"
|
77 |
f"\"{prompt}\"\n\n"
|
78 |
+
)
|
79 |
+
|
80 |
+
if feedback:
|
81 |
+
spec_prompt += f"Previous feedback to consider:\n{feedback}\n\n"
|
82 |
+
|
83 |
+
spec_prompt += (
|
84 |
"Generate a structured product specification including:\n"
|
85 |
"- Goals\n"
|
86 |
"- Key features\n"
|
87 |
"- User stories\n"
|
88 |
"- Success metrics\n"
|
89 |
)
|
90 |
+
|
91 |
return {
|
92 |
"messages": [AIMessage(content=spec_prompt)],
|
93 |
"chat_log": log + [{"role": "System", "content": spec_prompt}],
|
94 |
+
"iteration": iteration,
|
95 |
+
"feedback": feedback
|
96 |
}
|
97 |
|
98 |
# ββββββββββββββ
|
99 |
+
# 4) Feedback Loop Handler
|
100 |
# ββββββββββββββ
|
101 |
+
def handle_feedback(state: Dict[str, Any]) -> Dict[str, Any]:
|
102 |
+
qa_output = state["qa_output"]
|
103 |
+
iteration = state.get("iteration", 0)
|
104 |
+
|
105 |
+
# Check if we need another iteration
|
106 |
+
if iteration < 3: # Maximum 3 iterations
|
107 |
+
return {
|
108 |
+
"messages": state["messages"],
|
109 |
+
"chat_log": state["chat_log"],
|
110 |
+
"iteration": iteration + 1,
|
111 |
+
"feedback": f"Iteration {iteration + 1} feedback: {qa_output}"
|
112 |
+
}
|
113 |
+
return END
|
114 |
|
115 |
+
# ββββββββββββββ
|
116 |
+
# 5) Build & compile the LangGraph
|
117 |
+
# ββββββββββββββ
|
118 |
+
graph = StateGraph(input=InputState, output=OutputState)
|
|
|
|
|
|
|
119 |
|
120 |
+
# Add nodes
|
121 |
+
graph.add_node("BridgePM", bridge_to_pm)
|
122 |
+
graph.add_node("ProductManager", wrap_agent(product_manager_agent.run, "pm_output"))
|
123 |
+
graph.add_node("ProjectManager", wrap_agent(project_manager_agent.run, "proj_output"))
|
124 |
+
graph.add_node("SoftwareArchitect", wrap_agent(software_architect_agent.run, "arch_output"))
|
125 |
+
graph.add_node("UIDesigner", wrap_agent(ui_designer_agent.run, "ui_design_output"))
|
126 |
+
graph.add_node("SoftwareEngineer", wrap_agent(software_engineer_agent.run, "dev_output"))
|
127 |
+
graph.add_node("QualityAssurance", wrap_agent(quality_assurance_agent.run, "qa_output"))
|
128 |
+
graph.add_node("FeedbackHandler", handle_feedback)
|
129 |
+
|
130 |
+
# Add edges with feedback loop
|
131 |
graph.set_entry_point("BridgePM")
|
132 |
+
graph.add_edge("BridgePM", "ProductManager")
|
133 |
+
graph.add_edge("ProductManager", "ProjectManager")
|
134 |
+
graph.add_edge("ProjectManager", "SoftwareArchitect")
|
135 |
+
graph.add_edge("SoftwareArchitect", "UIDesigner")
|
136 |
+
graph.add_edge("UIDesigner", "SoftwareEngineer")
|
137 |
graph.add_edge("SoftwareEngineer", "QualityAssurance")
|
138 |
+
graph.add_edge("QualityAssurance", "FeedbackHandler")
|
139 |
+
graph.add_edge("FeedbackHandler", "BridgePM") # Feedback loop back to start
|
140 |
|
141 |
compiled_graph = graph.compile()
|
142 |
|
143 |
# ββββββββββββββ
|
144 |
+
# 6) Parse spec into sections
|
145 |
# ββββββββββββββ
|
146 |
def parse_spec(spec: str) -> Dict[str, List[str]]:
|
147 |
sections: Dict[str, List[str]] = {}
|
|
|
152 |
return sections
|
153 |
|
154 |
# ββββββββββββββ
|
155 |
+
# 7) Run pipeline, generate site, zip, return (chat_log, zip_path)
|
156 |
# ββββββββββββββ
|
157 |
def run_pipeline_and_save(prompt: str) -> Tuple[List[Dict[str, Any]], str]:
|
158 |
# a) invoke agents
|
159 |
+
initial_state = {"messages": [HumanMessage(content=prompt)], "chat_log": [], "iteration": 0, "feedback": ""}
|
160 |
final_state = compiled_graph.invoke(initial_state)
|
161 |
|
162 |
chat_log = final_state["chat_log"]
|