Rahul-8799 commited on
Commit
6678303
Β·
verified Β·
1 Parent(s): 5261f65

Update utils/langgraph_pipeline.py

Browse files
Files changed (1) hide show
  1. utils/langgraph_pipeline.py +84 -89
utils/langgraph_pipeline.py CHANGED
@@ -13,12 +13,9 @@ from agents import (
13
  project_manager_agent,
14
  software_architect_agent,
15
  software_engineer_agent,
16
- quality_assurance_agent,
17
  )
18
 
19
- # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
20
- # 1) State shapes
21
- # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
22
  class InputState(TypedDict):
23
  messages: List[BaseMessage]
24
  chat_log: List[Dict[str, Any]]
@@ -28,137 +25,135 @@ class OutputState(TypedDict):
28
  proj_output: str
29
  arch_output: str
30
  dev_output: str
31
- qa_output: str
32
  chat_log: List[Dict[str, Any]]
33
 
34
- # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
35
- # 2) Wrap each agent so it sees the full conversation history
36
- # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
37
  def wrap_agent(agent_run, output_key: str):
38
  def node(state: Dict[str, Any]) -> Dict[str, Any]:
39
  history = state["messages"]
40
  log = state["chat_log"]
41
  result = agent_run({"messages": history, "chat_log": log})
42
- # append any AIMessage(s) that the agent returned
43
  new_history = history + result["messages"]
44
  return {
45
  "messages": new_history,
46
- "chat_log": result["chat_log"],
47
- output_key: result[output_key],
48
  }
49
  return node
50
 
51
- # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
52
- # 3) Bridge to seed the first AIMessage for ProductManager
53
- # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
54
  def bridge_to_pm(state: Dict[str, Any]) -> Dict[str, Any]:
55
  history = state["messages"]
56
  log = state["chat_log"]
 
57
  if not history or not isinstance(history[-1], HumanMessage):
58
- raise ValueError("bridge_to_pm expected last message to be HumanMessage")
59
  prompt = history[-1].content
60
 
61
  spec_prompt = (
62
  f"# Stakeholder Prompt\n\n"
63
  f"\"{prompt}\"\n\n"
64
- "Please generate a structured product specification including:\n"
65
  "- Goals\n"
66
  "- Key features\n"
67
  "- User stories\n"
68
  "- Success metrics\n"
69
  )
70
- ai = AIMessage(content=spec_prompt)
71
  return {
72
- "messages": [ai],
73
  "chat_log": log + [{"role": "System", "content": spec_prompt}],
74
  }
75
 
76
- # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
77
- # 4) Build & compile the LangGraph
78
- # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  graph = StateGraph(input=InputState, output=OutputState)
80
 
 
81
  graph.add_node("BridgePM", bridge_to_pm)
82
- graph.add_node("ProductManager", wrap_agent(product_manager_agent.run, "pm_output"))
83
- graph.add_node("ProjectManager", wrap_agent(project_manager_agent.run, "proj_output"))
84
- graph.add_node("SoftwareArchitect", wrap_agent(software_architect_agent.run, "arch_output"))
85
- graph.add_node("SoftwareEngineer", wrap_agent(software_engineer_agent.run, "dev_output"))
86
- graph.add_node("QualityAssurance", wrap_agent(quality_assurance_agent.run, "qa_output"))
87
 
 
88
  graph.set_entry_point("BridgePM")
89
- graph.add_edge("BridgePM", "ProductManager")
90
- graph.add_edge("ProductManager", "ProjectManager")
91
- graph.add_edge("ProjectManager", "SoftwareArchitect")
92
- graph.add_edge("SoftwareArchitect","SoftwareEngineer")
93
- graph.add_edge("SoftwareEngineer", "QualityAssurance")
94
- graph.add_edge("QualityAssurance", END)
95
 
96
  compiled_graph = graph.compile()
97
 
98
- # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
99
- # 5) Run, parse QA spec, generate HTML/CSS, zip, and return
100
- # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
101
  def run_pipeline_and_save(prompt: str) -> Tuple[List[Dict[str, Any]], str]:
102
- # a) Invoke the multi-agent pipeline
103
- initial_state = {
104
- "messages": [HumanMessage(content=prompt)],
105
- "chat_log": [],
106
- }
107
- final_state = compiled_graph.invoke(initial_state)
108
-
109
- chat_log = final_state["chat_log"]
110
- qa_output = final_state["qa_output"]
111
-
112
- # b) Parse the QA spec into sections
113
- sections = {}
114
- for match in re.finditer(r"##\s*(.+?)\n((?:- .+\n?)+)", qa_output):
115
- title = match.group(1).strip()
116
- items = [line.strip()[2:].strip() for line in match.group(2).splitlines() if line.startswith("- ")]
117
- sections[title] = items
118
-
119
- # c) Build HTML
120
- title = prompt.title()
121
- html_parts = [
122
- "<!DOCTYPE html>",
123
- "<html lang='en'><head>",
124
- " <meta charset='UTF-8'>",
125
- " <meta name='viewport' content='width=device-width,initial-scale=1'>",
126
- f" <title>{title}</title>",
127
- " <link rel='stylesheet' href='styles.css'>",
128
- "</head><body>",
129
- f" <header><h1>{title}</h1></header>",
130
- ]
131
- # for each spec section, render as a <section>
132
- for sec in ["Goals", "Key features", "User stories", "Success metrics"]:
133
- items = sections.get(sec, [])
134
- html_parts.append(f" <section id='{sec.lower().replace(' ', '-')}'><h2>{sec}</h2><ul>")
135
- for it in items:
136
- html_parts.append(f" <li>{it}</li>")
137
- html_parts.append(" </ul></section>")
138
- html_parts.append("</body></html>")
139
- html_code = "\n".join(html_parts)
140
-
141
- # d) Simple CSS
142
- css_code = """
143
- body { font-family: Arial, sans-serif; margin: 2em; line-height: 1.5; }
144
- header { text-align: center; margin-bottom: 2em; }
145
- section { margin-bottom: 1.5em; }
146
- section h2 { color: #333; }
147
- ul { list-style: disc inside; }
148
- """
149
-
150
- # e) Write files & zip
151
  site_id = uuid.uuid4().hex
152
- out_dir = Path("output")
153
- site_dir = out_dir / f"site_{site_id}"
154
  site_dir.mkdir(parents=True, exist_ok=True)
155
 
156
  (site_dir / "index.html").write_text(html_code, encoding="utf-8")
157
- (site_dir / "styles.css").write_text(css_code, encoding="utf-8")
158
 
159
- zip_path = out_dir / f"site_{site_id}.zip"
 
160
  with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf:
161
  for f in site_dir.iterdir():
162
  zf.write(f, arcname=f.name)
163
 
164
- return chat_log, str(zip_path)
 
13
  project_manager_agent,
14
  software_architect_agent,
15
  software_engineer_agent,
 
16
  )
17
 
18
+ # 1) Define the shapes of our state
 
 
19
  class InputState(TypedDict):
20
  messages: List[BaseMessage]
21
  chat_log: List[Dict[str, Any]]
 
25
  proj_output: str
26
  arch_output: str
27
  dev_output: str
 
28
  chat_log: List[Dict[str, Any]]
29
 
30
+
31
+ # 2) Helper to wrap any agent so it sees full history
 
32
  def wrap_agent(agent_run, output_key: str):
33
  def node(state: Dict[str, Any]) -> Dict[str, Any]:
34
  history = state["messages"]
35
  log = state["chat_log"]
36
  result = agent_run({"messages": history, "chat_log": log})
37
+ # append the AIMessage(s) returned by the agent
38
  new_history = history + result["messages"]
39
  return {
40
  "messages": new_history,
41
+ "chat_log": result["chat_log"],
42
+ output_key: result[output_key],
43
  }
44
  return node
45
 
46
+
47
+ # 3) Seed the chain by turning the user's prompt into a spec for PM
 
48
  def bridge_to_pm(state: Dict[str, Any]) -> Dict[str, Any]:
49
  history = state["messages"]
50
  log = state["chat_log"]
51
+ # last message must be the HumanMessage
52
  if not history or not isinstance(history[-1], HumanMessage):
53
+ raise ValueError("bridge_to_pm expects a HumanMessage at the end of history")
54
  prompt = history[-1].content
55
 
56
  spec_prompt = (
57
  f"# Stakeholder Prompt\n\n"
58
  f"\"{prompt}\"\n\n"
59
+ "Generate a structured product specification including:\n"
60
  "- Goals\n"
61
  "- Key features\n"
62
  "- User stories\n"
63
  "- Success metrics\n"
64
  )
 
65
  return {
66
+ "messages": [AIMessage(content=spec_prompt)],
67
  "chat_log": log + [{"role": "System", "content": spec_prompt}],
68
  }
69
 
70
+
71
+ # 4) After architecture, ask the Software Engineer for two files
72
+ def bridge_to_code(state: Dict[str, Any]) -> Dict[str, Any]:
73
+ history = state["messages"]
74
+ log = state["chat_log"]
75
+ # Ensure the architect has spoken
76
+ if not history or not isinstance(history[-1], AIMessage):
77
+ raise ValueError("bridge_to_code expects an AIMessage from the architect")
78
+ # Grab the original user prompt for titling
79
+ user_prompt = history[0].content
80
+ title = user_prompt.title()
81
+
82
+ instruction = (
83
+ f"Now generate two files for a static website based on the above architecture:\n\n"
84
+ f"===index.html===\n"
85
+ f"<!DOCTYPE html>\n"
86
+ f"<html lang=\"en\">\n"
87
+ f"<head>\n"
88
+ f" <meta charset=\"UTF-8\">\n"
89
+ f" <meta name=\"viewport\" content=\"width=device-width,initial-scale=1\">\n"
90
+ f" <title>{title}</title>\n"
91
+ f" <link rel=\"stylesheet\" href=\"styles.css\">\n"
92
+ f"</head>\n"
93
+ f"<body>\n"
94
+ f" <!-- Your site UI goes here -->\n"
95
+ f"</body>\n"
96
+ f"</html>\n\n"
97
+ f"===styles.css===\n"
98
+ f"/* Your CSS styles go here */\n"
99
+ )
100
+ return {
101
+ "messages": history + [AIMessage(content=instruction)],
102
+ "chat_log": log + [{"role": "System", "content": instruction}],
103
+ }
104
+
105
+
106
+ # 5) Build and compile the LangGraph
107
  graph = StateGraph(input=InputState, output=OutputState)
108
 
109
+ # Nodes
110
  graph.add_node("BridgePM", bridge_to_pm)
111
+ graph.add_node("ProductManager", wrap_agent(product_manager_agent.run, "pm_output"))
112
+ graph.add_node("ProjectManager", wrap_agent(project_manager_agent.run, "proj_output"))
113
+ graph.add_node("SoftwareArchitect", wrap_agent(software_architect_agent.run,"arch_output"))
114
+ graph.add_node("BridgeCode", bridge_to_code)
115
+ graph.add_node("SoftwareEngineer", wrap_agent(software_engineer_agent.run,"dev_output"))
116
 
117
+ # Flow
118
  graph.set_entry_point("BridgePM")
119
+ graph.add_edge("BridgePM", "ProductManager")
120
+ graph.add_edge("ProductManager", "ProjectManager")
121
+ graph.add_edge("ProjectManager", "SoftwareArchitect")
122
+ graph.add_edge("SoftwareArchitect", "BridgeCode")
123
+ graph.add_edge("BridgeCode", "SoftwareEngineer")
124
+ graph.add_edge("SoftwareEngineer", END)
125
 
126
  compiled_graph = graph.compile()
127
 
128
+
129
+ # 6) Run the pipeline, parse out the two files, write & zip them
 
130
  def run_pipeline_and_save(prompt: str) -> Tuple[List[Dict[str, Any]], str]:
131
+ # a) Run through all agents
132
+ initial_state = {"messages": [HumanMessage(content=prompt)], "chat_log": []}
133
+ final_state = compiled_graph.invoke(initial_state)
134
+
135
+ chat_log = final_state["chat_log"]
136
+ code_blob = final_state["dev_output"]
137
+
138
+ # b) Extract HTML and CSS between our markers
139
+ html_match = re.search(r"===index\.html===\s*(.*?)\s*===styles\.css===", code_blob, re.S)
140
+ css_match = re.search(r"===styles\.css===\s*(.*)", code_blob, re.S)
141
+
142
+ html_code = html_match.group(1).strip() if html_match else ""
143
+ css_code = css_match.group(1).strip() if css_match else ""
144
+
145
+ # c) Write files to disk
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  site_id = uuid.uuid4().hex
147
+ site_dir = Path("output") / f"site_{site_id}"
 
148
  site_dir.mkdir(parents=True, exist_ok=True)
149
 
150
  (site_dir / "index.html").write_text(html_code, encoding="utf-8")
151
+ (site_dir / "styles.css").write_text(css_code, encoding="utf-8")
152
 
153
+ # d) Zip them up
154
+ zip_path = Path("output") / f"site_{site_id}.zip"
155
  with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf:
156
  for f in site_dir.iterdir():
157
  zf.write(f, arcname=f.name)
158
 
159
+ return chat_log, str(zip_path)