File size: 12,020 Bytes
0f09cc9
 
b210243
 
 
 
 
0f09cc9
b210243
0f09cc9
b210243
0f09cc9
 
 
 
 
 
06dc1b7
b210243
0f09cc9
 
 
b210243
0f09cc9
 
 
b210243
 
 
 
 
 
954011f
 
 
 
 
b210243
 
0f09cc9
 
 
b210243
0f09cc9
 
 
 
 
 
06dc1b7
0f09cc9
 
 
 
b210243
 
 
0f09cc9
 
b210243
06dc1b7
0f09cc9
b210243
 
 
954011f
b210243
 
 
0f09cc9
b210243
 
0f09cc9
 
b210243
 
 
0f09cc9
 
 
 
 
 
 
 
 
 
 
 
 
 
b210243
 
0f09cc9
 
 
 
b210243
 
0f09cc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b210243
 
0f09cc9
 
 
 
 
 
 
 
b210243
 
 
0f09cc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b210243
 
 
0f09cc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b210243
 
0f09cc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b210243
0f09cc9
b210243
0f09cc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b210243
 
0f09cc9
 
 
b210243
0f09cc9
 
 
 
b210243
 
 
0f09cc9
 
 
 
 
b210243
 
06dc1b7
 
 
0f09cc9
06dc1b7
 
b210243
0f09cc9
 
 
 
b210243
0f09cc9
b210243
 
0f09cc9
 
 
 
 
 
 
b210243
06dc1b7
 
 
0f09cc9
 
06dc1b7
0f09cc9
b210243
0f09cc9
 
 
b210243
 
0f09cc9
 
 
 
 
b210243
0f09cc9
 
06dc1b7
 
0f09cc9
 
 
06dc1b7
 
0f09cc9
b210243
0f09cc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7396022
0f09cc9
 
24f5624
0f09cc9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
from typing import Dict, List, TypedDict, Sequence
from langgraph.graph import StateGraph, END
from langchain.schema import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain_community.tools.tavily_search import TavilySearchResults
import models
import prompts
import json
from operator import itemgetter
from langgraph.errors import GraphRecursionError


#######################################
###     Research Team Components    ###
#######################################
class ResearchState(TypedDict):
    workflow: List[str]
    topic: str
    research_data: Dict[str, str]
    next: str
    message_to_manager: str
    message_from_manager: str

#
#   Reserach Chains and Tools
#
qdrant_research_chain = (
        {"context": itemgetter("topic") | models.compression_retriever, "topic": itemgetter("topic")}
        | RunnablePassthrough.assign(context=itemgetter("context"))
        | {"response": prompts.research_query_prompt  | models.gpt4o_mini | StrOutputParser(), "context": itemgetter("context")}
    )

tavily_tool = TavilySearchResults(max_results=3)
query_chain = ( prompts.search_query_prompt | models.gpt4o_mini | StrOutputParser() )
tavily_simple = ({"tav_results": tavily_tool} | prompts.tavily_prompt | models.gpt4o_mini | StrOutputParser())
tavily_chain = (
    {"query": query_chain} | tavily_simple
)

research_supervisor_chain = (
    prompts.research_supervisor_prompt | models.gpt4o | StrOutputParser()
)

#
#   Reserach Node Defs
#
def query_qdrant(state: ResearchState) -> ResearchState:
    #print("qdrant node")
    topic = state["topic"]
    result = qdrant_research_chain.invoke({"topic": topic})
    #print(result)
    state["research_data"]["qdrant_results"] = result["response"]
    state['workflow'].append("query_qdrant")
    #print(state['workflow'])

    return state

def web_search(state: ResearchState) -> ResearchState:
    #print("tavily node")
    # Extract the last message as the topic
    topic = state["topic"]
    #print(topic)
    # Get the Qdrant results from the state
    qdrant_results = state["research_data"].get("qdrant_results", "No previous results available.")
    # Run the web search chain
    result = tavily_chain.invoke({
        "topic": topic,
        "qdrant_results": qdrant_results
    })
    #print(result)
    # Update the state with the web search results
    state["research_data"]["web_search_results"] = result
    state['workflow'].append("web_search")
    #print(state['workflow'])
    return state

def research_supervisor(state):
    #print("research supervisor node")
    message_from_manager = state["message_from_manager"]
    collected_data = state["research_data"]
    topic = state['topic']
    supervisor_result = research_supervisor_chain.invoke({"message_from_manager": message_from_manager, "collected_data": collected_data, "topic": topic})
    lines = supervisor_result.split('\n')
    #print(supervisor_result)
    for line in lines:
        if line.startswith('Next Action: '):
            state['next'] = line[len('Next Action: '):].strip()  # Extract the next action content
        elif line.startswith('Message to project manager: '):
            state['message_to_manager'] = line[len('Message to project manager: '):].strip()
    state['workflow'].append("research_supervisor")
    #print(state['workflow'])
    return state

def research_end(state):
    #print("research_end node")
    state['workflow'].append("research_end")
    #print(state['workflow'])
    return state

#######################################
###     Writing Team Components     ###
#######################################
class WritingState(TypedDict):
    workflow: List[str]
    topic: str
    research_data: Dict[str, str]
    draft_posts: Sequence[str]
    final_post: str
    next: str
    message_to_manager: str
    message_from_manager: str
    review_comments: str
    style_checked: bool

#
#   Writing Chains
#
writing_supervisor_chain = (
    prompts.writing_supervisor_prompt | models.gpt4o | StrOutputParser()
)

post_creation_chain = (
    prompts.post_creation_prompt | models.gpt4o_mini | StrOutputParser()
)

post_editor_chain = (
	prompts.post_editor_prompt | models.gpt4o | StrOutputParser()
)

post_review_chain = (
	prompts.post_review_prompt | models.gpt4o | StrOutputParser()
)

#
#   Writing Node Defs
#
def post_creation(state):
    print("post_creation node")
    topic = state['topic']
    drafts = state['draft_posts']
    collected_data = state["research_data"]
    review_comments = state['review_comments']
    results = post_creation_chain.invoke({"topic": topic, "collected_data": collected_data, "drafts": drafts, "review_comments": review_comments})
    state['draft_posts'].append(results)
    state['workflow'].append("post_creation")
    return state

def post_editor(state):
    print("post_editor node")
    current_draft = state['draft_posts'][-1]
    styleguide = prompts.style_guide_text
    review_comments = state['review_comments']
    results = post_editor_chain.invoke({"current_draft": current_draft, "styleguide": styleguide, "review_comments": review_comments})
    state['draft_posts'].append(results)
    state['workflow'].append("post_editor")
    return state

def post_review(state):
    print("post_review node")
    current_draft = state['draft_posts'][-1]
    styleguide = prompts.style_guide_text
    results = post_review_chain.invoke({"current_draft": current_draft, "styleguide": styleguide})
    data = json.loads(results.strip())
    state['review_comments'] = data["Comments on current draft"]
    if data["Draft Acceptable"] == 'Yes':
        state['final_post'] = state['draft_posts'][-1]
    state['workflow'].append("post_review")
    return state

def writing_end(state):
    print("writing_end node")
    state['workflow'].append("writing_end")
    print(state['workflow'])
    return state

def writing_supervisor(state):
    print("writing_supervisor node")
    message_from_manager = state['message_from_manager']
    topic = state['topic']
    drafts = state['draft_posts']
    final_draft = state['final_post']
    review_comments = state['review_comments']
    supervisor_result = writing_supervisor_chain.invoke({"review_comments": review_comments, "message_from_manager": message_from_manager, "topic": topic, "drafts": drafts, "final_draft": final_draft})
    lines = supervisor_result.split('\n')
    print(supervisor_result)
    for line in lines:
        if line.startswith('Next Action: '):
            state['next'] = line[len('Next Action: '):].strip()  # Extract the next action content
        elif line.startswith('Message to project manager: '):
            state['message_to_manager'] = line[len('Message to project manager: '):].strip()
    state['workflow'].append("writing_supervisor")
    return state

#######################################
###  Overarching Graph Components   ###
#######################################
class State(TypedDict):
    workflow: List[str]
    topic: str
    research_data: Dict[str, str]
    draft_posts: Sequence[str]
    final_post: str
    next: str
    user_input: str
    message_to_manager: str
    message_from_manager: str
    last_active_team :str
    next_team: str
    review_comments: str

#
#   Complete Graph Chains
#
overall_supervisor_chain = (
    prompts.overall_supervisor_prompt | models.gpt4o | StrOutputParser()
)

#
#   Complete Graph Node defs
#
def overall_supervisor(state):
    print("overall supervisor node")
    # Implement overall supervision logic
    init_user_query = state["user_input"]
    message_to_manager = state['message_to_manager']
    last_active_team = state['last_active_team']
    supervisor_result = overall_supervisor_chain.invoke({"query": init_user_query, "message_to_manager": message_to_manager, "last_active_team": last_active_team})
    lines = supervisor_result.split('\n')
    print(supervisor_result)
    for line in lines:
        if line.startswith('Next Action: '):
            state['next_team'] = line[len('Next Action: '):].strip()  # Extract the next action content
        elif line.startswith('Extracted Topic: '):
            state['topic'] = line[len('Extracted Topic: '):].strip()  # Extract the next action content
        elif line.startswith('Message to supervisor: '):
            state['message_from_manager'] = line[len('Message to supervisor: '):].strip()  # Extract the next action content
    state['workflow'].append("overall_supervisor")
    print(state['next_team'])
    print(state['workflow'])
    return state

#######################################
###         Graph structures        ###
#######################################

#
#   Reserach Graph Nodes
#
research_graph = StateGraph(ResearchState)
research_graph.add_node("query_qdrant", query_qdrant)
research_graph.add_node("web_search", web_search)
research_graph.add_node("research_supervisor", research_supervisor)
research_graph.add_node("research_end", research_end)
#
#   Reserach Graph Edges
#
research_graph.set_entry_point("research_supervisor")
research_graph.add_edge("query_qdrant", "research_supervisor")
research_graph.add_edge("web_search", "research_supervisor")
research_graph.add_conditional_edges(
    "research_supervisor",
    lambda x: x["next"],
    {"query_qdrant": "query_qdrant", "web_search": "web_search", "FINISH": "research_end"},
)
research_graph_comp = research_graph.compile()

#
#   Writing Graph Nodes
#
writing_graph = StateGraph(WritingState)
writing_graph.add_node("post_creation", post_creation)
writing_graph.add_node("post_editor", post_editor)
writing_graph.add_node("post_review", post_review)
writing_graph.add_node("writing_supervisor", writing_supervisor)
writing_graph.add_node("writing_end", writing_end)
#
#   Writing Graph Edges
#
writing_graph.set_entry_point("writing_supervisor")
writing_graph.add_edge("post_creation", "post_editor")
writing_graph.add_edge("post_editor", "post_review")
writing_graph.add_edge("post_review", "writing_supervisor")
writing_graph.add_conditional_edges(
    "writing_supervisor",
    lambda x: x["next"],
    {"NEW DRAFT": "post_creation", 
     "FINISH": "writing_end"},
)
writing_graph_comp = writing_graph.compile()

#
#   Complete Graph Nodes
#
overall_graph = StateGraph(State)
overall_graph.add_node("overall_supervisor", overall_supervisor)
overall_graph.add_node("research_team_graph", research_graph_comp)
overall_graph.add_node("writing_team_graph", writing_graph_comp)
#
#   Complete Graph Edges
#
overall_graph.set_entry_point("overall_supervisor")
overall_graph.add_edge("research_team_graph", "overall_supervisor")
overall_graph.add_edge("writing_team_graph", "overall_supervisor")
overall_graph.add_conditional_edges(
    "overall_supervisor",
    lambda x: x["next_team"],
    {"research_team": "research_team_graph",
     "writing_team": "writing_team_graph", 
     "FINISH": END},
)
app = overall_graph.compile()


#######################################
###         Run method              ###
#######################################

def getSocialMediaPost(userInput: str) -> str:
    finalPost = ""
    initial_state = State(
        workflow = [],
        topic= "",
        research_data = {},
        draft_posts = [],
        final_post = [],
        next = [],
        next_team = [],
        user_input=userInput,
        message_to_manager="",
        message_from_manager="",
        last_active_team="",
        review_comments=""
    )
    results = app.invoke(initial_state)
    try:
        results = app.invoke(initial_state, {"recursion_limit": 30})
    except GraphRecursionError:
        return "Recursion Error"
    finalPost = results['final_post']
    return finalPost