File size: 7,718 Bytes
4eb10d7
 
4bb72bf
7caef44
4eb10d7
 
01a8f5e
 
4bb72bf
 
4eb10d7
 
 
01a8f5e
 
4eb10d7
 
 
 
59589fc
4eb10d7
 
 
 
 
01a8f5e
4eb10d7
 
 
 
 
 
 
 
 
 
01a8f5e
 
 
4eb10d7
01a8f5e
 
4eb10d7
01a8f5e
4eb10d7
59589fc
4eb10d7
01a8f5e
 
 
4eb10d7
01a8f5e
 
4eb10d7
01a8f5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4eb10d7
01a8f5e
4eb10d7
01a8f5e
 
4eb10d7
01a8f5e
 
4eb10d7
01a8f5e
 
 
 
169c6ad
01a8f5e
 
169c6ad
01a8f5e
 
 
4eb10d7
01a8f5e
 
 
 
 
 
 
4eb10d7
01a8f5e
4eb10d7
01a8f5e
4eb10d7
01a8f5e
 
 
4eb10d7
 
01a8f5e
 
 
 
 
4eb10d7
01a8f5e
4eb10d7
01a8f5e
 
 
4eb10d7
 
 
01a8f5e
 
 
 
4eb10d7
01a8f5e
 
4eb10d7
01a8f5e
 
169c6ad
01a8f5e
 
4eb10d7
01a8f5e
 
 
 
 
 
 
 
7caef44
01a8f5e
 
 
 
 
 
7caef44
 
 
01a8f5e
 
7caef44
 
01a8f5e
 
 
7caef44
 
 
 
 
 
 
 
 
 
01a8f5e
 
 
 
 
 
7caef44
169c6ad
01a8f5e
7caef44
01a8f5e
 
7caef44
01a8f5e
7caef44
01a8f5e
 
 
 
 
 
7caef44
 
 
 
01a8f5e
 
 
 
 
 
7caef44
01a8f5e
7caef44
169c6ad
01a8f5e
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
import os
import dotenv
from typing import Literal

from langchain_openai import AzureChatOpenAI

from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.prebuilt import ToolNode
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

from langgraph.checkpoint import MemorySaver


from info_retriever import runnable_retriever, retriever_tool_belt


dotenv.load_dotenv()

VERSION = '1.0_rc3'
os.environ["LANGCHAIN_PROJECT"] = os.environ["LANGCHAIN_PROJECT"] + f" - v. {VERSION}"


############################################
# Set up the model
llm_chatbot = AzureChatOpenAI(
    azure_deployment=os.environ['AZURE_OPENAI_DEPLOYMENT'],
    api_version="2024-05-01-preview",
    temperature=0,
    max_tokens=None,
    timeout=None,
    max_retries=2,
    streaming=True
)


#################################################################################
# RUNNABLE CHAINS
# Create our runnable chains, depending on the prompt we want to pass forward

main_prompt = """
You are a helpful agent designed to help the user get the information they requested.

You collaborate with another agent that retrieves the information for you.

When a user asks you a question, you will forward the query to another agent. 

You will receive information from another agent with the results of their research
into the user's query. Your task is to repeat it word by word to the user. 
Do not summarize the other agent's answer. 

You MUST cite your source documents.
"""

# Create a chain with the main prompt
main_prompt_template = ChatPromptTemplate.from_messages(
    [
        (
            "system", main_prompt
        ),
        MessagesPlaceholder(variable_name="messages"),
    ]
)
chat_runnable = main_prompt_template | llm_chatbot

# ----------------------------------------------------------------
# Create a second chain with the prompt that tells the chatbot
# to route the query to the ticket creation

ticketing_prompt = """
                You are a helpful agent who helps the user send requests to the Business Analytics department.
                You are called only when the system was unable to find the user's requested information.

                Your task is, first, to apologize to the user for not finding the information.
                Then, you will take the user's query and create a well-formatted JSON request.
                The request should be in the following json format:

                {{
                    'project': {{'id': 123}},
                    'summary': USER'S QUERY',
                    'description': summary of the user's query,
                    'issuetype': {{'name': 'Report'}},
                }}

                You will tell the user they can use the JSON request above to make their request to the Business Analytics
                department.
            """     
ticketing_prompt_template  = ChatPromptTemplate.from_messages(
    [
        (
            "system", ticketing_prompt
        ),
        MessagesPlaceholder(variable_name="messages"),
    ]
)

ticketing_runnable = ticketing_prompt_template | llm_chatbot

#################################################################################
# MANUALLY CREATE OUR GRAPH

#------------------------------------
# We need a custom state

class SimpleAgentState(MessagesState):
    """Extends the default MessagesState to add a current answer type 
    to allow us to properly route the messages"""
    response_type: str

#------------------------------------
# Defining our nodes

def chatbot(state: SimpleAgentState):
    last_message = state['messages'][-1]
    current_response_type = state['response_type']

    # Update the current response type if needed
    if isinstance(last_message, HumanMessage):
        # This is a user query
        current_response_type = 'user_query'
    elif isinstance(last_message, AIMessage):
        # This is an agent's response
        current_response_type = 'agent_response'

    invoke_input = {'messages': state['messages'], 'response_type': current_response_type}

    response = chat_runnable.invoke(invoke_input)

    # Return an update to the state with the latest response
    output = {'messages': [response], 'response_type': current_response_type}
    return output


def ticketing_bot(state: SimpleAgentState):
    #current_response_type = state.get('response_type')
    
    # We manually set response_type to no_answer to invoke the chain
    invoke_input = {'messages': state['messages'], 'response_type': 'no_answer'}

    response = ticketing_runnable.invoke(invoke_input)

    # After we get the response, we manually set response_type to "ticket_generated"
    output = {'messages': [response], 'response_type': 'ticket_generated'}
    return output



def retriever(state: SimpleAgentState):
    # Run a retrieval query on the conversation state
    response = runnable_retriever.invoke(state["messages"])
    current_response_type = state.get('response_type')

    output = {'messages': [response], 'response_type': current_response_type}
    return output

# For the retriever tool node we use the inbuilt ToolNode
retriever_tool_node = ToolNode(tools=retriever_tool_belt)

#------------------------------------
# Defining our routing functions

def route_query(
    state: SimpleAgentState,
) -> Literal["retriever", "__end__"]:
    """
    Use in the conditional_edge to the info retriever if there is a query. 
    Otherwise, route to the end.
    """
    response_type = state.get('response_type')

    if response_type == 'user_query':
        # Routing to the query retriever
        return 'query'

    # Otherwise, go to the end
    return "__end__"


def route_tools(
    state: SimpleAgentState,
) -> Literal["tools", "success", "no_answer"]:
    """
    Use in the conditional_edge to route to the ToolNode if the last message
    has tool calls. Otherwise, we check if the retriever found the information
    requested. If yes, we route to the end. Otherwise, we send the request
    to the ticketing bot.
    """
    if isinstance(state, list):
        ai_message = state[-1]
    elif messages := state.get("messages", []):
        ai_message = messages[-1]
    else:
        raise ValueError(f"No messages found in input state to tool_edge: {state}")
    if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
        return "tools"

    # Check if we couldn't find an answer
    if 'no information found' in ai_message.content.lower():
        return "no_answer"

    return "success"


#----------------------------------------
# Build the graph and connect the edges

# Start the graph
graph_builder = StateGraph(SimpleAgentState)

# Add the nodes
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_node("retriever", retriever)
graph_builder.add_node("retriever_tools", retriever_tool_node)
graph_builder.add_node("ticketing_bot", ticketing_bot)

# Add the edges
graph_builder.add_edge(START, "chatbot")


graph_builder.add_conditional_edges(
    "chatbot",
    route_query,
    {"query": "retriever", "__end__": "__end__"},
)

graph_builder.add_conditional_edges(
    "retriever",
    route_tools,
    {"tools": "retriever_tools", "success": "chatbot", "no_answer": "ticketing_bot"},
)

# Any time a tool is called, we return to the retriever to decide the next step
graph_builder.add_edge("retriever_tools", "retriever")

# The ticketing bot goes directly to the end
graph_builder.add_edge("ticketing_bot", END)

#----------------------------------------
# Compile!
# Also, we add memory to the agent using a checkpointer
graph = graph_builder.compile(checkpointer=MemorySaver())