Spaces:
Runtime error
Runtime error
optim v2: separated response for each component, mod becomes a classifier
Browse files- allofresh_chatbot.py +20 -22
- app.py +3 -3
- prompts/mod_prompt.py +1 -1
allofresh_chatbot.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import os
|
2 |
from dotenv import load_dotenv
|
3 |
import re
|
|
|
4 |
|
5 |
from langchain import PromptTemplate, LLMChain
|
6 |
from langchain.agents import initialize_agent, Tool
|
@@ -13,7 +14,7 @@ from langchain.llms import AzureOpenAI
|
|
13 |
from langchain.prompts import PromptTemplate
|
14 |
|
15 |
from utils import lctool_search_allo_api, cut_dialogue_history
|
16 |
-
from prompts.mod_prompt import MOD_PROMPT, FALLBACK_MESSAGE
|
17 |
from prompts.ans_prompt import ANS_PREFIX, ANS_FORMAT_INSTRUCTIONS, ANS_SUFFIX, ANS_CHAIN_PROMPT
|
18 |
from prompts.reco_prompt import RECO_PREFIX, RECO_FORMAT_INSTRUCTIONS, RECO_SUFFIX, NO_RECO_OUTPUT
|
19 |
|
@@ -69,7 +70,7 @@ class AllofreshChatbot():
|
|
69 |
|
70 |
def init_mod_chain(self):
|
71 |
mod_prompt = PromptTemplate(
|
72 |
-
template=
|
73 |
input_variables=["input"]
|
74 |
)
|
75 |
|
@@ -97,7 +98,6 @@ class AllofreshChatbot():
|
|
97 |
self.llms["gpt-4"],
|
98 |
agent="conversational-react-description",
|
99 |
verbose=self.debug,
|
100 |
-
memory=self.ans_memory,
|
101 |
return_intermediate_steps=True,
|
102 |
agent_kwargs={
|
103 |
'prefix': ANS_PREFIX,
|
@@ -114,24 +114,6 @@ class AllofreshChatbot():
|
|
114 |
|
115 |
# Define the first LLM chain with the shared AzureOpenAI object and prompt template
|
116 |
return LLMChain(llm=self.llms["gpt-4"], prompt=ans_prompt)
|
117 |
-
|
118 |
-
def ans_pipeline(self, text, debug_cost=False, keep_last_n_words=500):
|
119 |
-
try:
|
120 |
-
self.ans_agent.memory.buffer = cut_dialogue_history(self.ans_agent.memory.buffer, keep_last_n_words=keep_last_n_words)
|
121 |
-
except:
|
122 |
-
pass
|
123 |
-
finally:
|
124 |
-
with get_openai_callback() as openai_cb:
|
125 |
-
res = self.ans_agent({"input": text.strip()})
|
126 |
-
response = res['output'].replace("\\", "/")
|
127 |
-
|
128 |
-
if debug_cost:
|
129 |
-
print(f"Total Tokens: {openai_cb.total_tokens}")
|
130 |
-
print(f"Prompt Tokens: {openai_cb.prompt_tokens}")
|
131 |
-
print(f"Completion Tokens: {openai_cb.completion_tokens}")
|
132 |
-
print(f"Total Cost (USD): ${openai_cb.total_cost}")
|
133 |
-
|
134 |
-
return response
|
135 |
|
136 |
def init_reco_agent(self):
|
137 |
reco_tools = [
|
@@ -195,10 +177,26 @@ class AllofreshChatbot():
|
|
195 |
return self.ans_chain.run({"input": query, "chat_history": str(chat_history)})
|
196 |
return FALLBACK_MESSAGE
|
197 |
|
198 |
-
def
|
199 |
"""
|
200 |
We plugged off the tools from the 'answering' component and replaced it with a simple chain
|
201 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
reco = self.reco_agent.run({"input": chat_history})
|
203 |
# filter out reco (str) to only contain alphabeticals
|
204 |
return reco if reco != NO_RECO_OUTPUT else None
|
|
|
1 |
import os
|
2 |
from dotenv import load_dotenv
|
3 |
import re
|
4 |
+
from loguru import logger
|
5 |
|
6 |
from langchain import PromptTemplate, LLMChain
|
7 |
from langchain.agents import initialize_agent, Tool
|
|
|
14 |
from langchain.prompts import PromptTemplate
|
15 |
|
16 |
from utils import lctool_search_allo_api, cut_dialogue_history
|
17 |
+
from prompts.mod_prompt import MOD_PROMPT, FALLBACK_MESSAGE, MOD_PROMPT_OPTIM_v2
|
18 |
from prompts.ans_prompt import ANS_PREFIX, ANS_FORMAT_INSTRUCTIONS, ANS_SUFFIX, ANS_CHAIN_PROMPT
|
19 |
from prompts.reco_prompt import RECO_PREFIX, RECO_FORMAT_INSTRUCTIONS, RECO_SUFFIX, NO_RECO_OUTPUT
|
20 |
|
|
|
70 |
|
71 |
def init_mod_chain(self):
|
72 |
mod_prompt = PromptTemplate(
|
73 |
+
template=MOD_PROMPT_OPTIM_v2,
|
74 |
input_variables=["input"]
|
75 |
)
|
76 |
|
|
|
98 |
self.llms["gpt-4"],
|
99 |
agent="conversational-react-description",
|
100 |
verbose=self.debug,
|
|
|
101 |
return_intermediate_steps=True,
|
102 |
agent_kwargs={
|
103 |
'prefix': ANS_PREFIX,
|
|
|
114 |
|
115 |
# Define the first LLM chain with the shared AzureOpenAI object and prompt template
|
116 |
return LLMChain(llm=self.llms["gpt-4"], prompt=ans_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
def init_reco_agent(self):
|
119 |
reco_tools = [
|
|
|
177 |
return self.ans_chain.run({"input": query, "chat_history": str(chat_history)})
|
178 |
return FALLBACK_MESSAGE
|
179 |
|
180 |
+
def answer_optim_v2(self, query, chat_history):
|
181 |
"""
|
182 |
We plugged off the tools from the 'answering' component and replaced it with a simple chain
|
183 |
"""
|
184 |
+
# moderate
|
185 |
+
mod_verdict = self.mod_chain.run({"input": query})
|
186 |
+
llm_input = {"input": query, "chat_history": str(chat_history)}
|
187 |
+
|
188 |
+
logger.info(f"mod verdict: {mod_verdict}")
|
189 |
+
# if no need to access knowledge base
|
190 |
+
if mod_verdict == "ANS_CHAIN":
|
191 |
+
# answer question
|
192 |
+
return self.ans_chain.run(llm_input)
|
193 |
+
# if need to access knowledge base
|
194 |
+
elif mod_verdict == "ANS_AGENT":
|
195 |
+
res = self.ans_agent(llm_input)
|
196 |
+
return res['output'].replace("\\", "/")
|
197 |
+
return FALLBACK_MESSAGE
|
198 |
+
|
199 |
+
def reco_optim_v1(self, chat_history):
|
200 |
reco = self.reco_agent.run({"input": chat_history})
|
201 |
# filter out reco (str) to only contain alphabeticals
|
202 |
return reco if reco != NO_RECO_OUTPUT else None
|
app.py
CHANGED
@@ -40,14 +40,14 @@ def predict_answer(input, history):
|
|
40 |
"""
|
41 |
Answering component
|
42 |
"""
|
43 |
-
answer = allo_chatbot.
|
44 |
|
45 |
history.append({'role': 'user', 'content': None})
|
46 |
history.append({'role': 'assistant', 'content': answer})
|
47 |
|
48 |
return fetch_messages(history), history
|
49 |
|
50 |
-
def predict_reco(
|
51 |
"""
|
52 |
Reco component
|
53 |
"""
|
@@ -73,7 +73,7 @@ with gr.Blocks() as app:
|
|
73 |
).success(
|
74 |
predict_answer, [txt, state], [chatbot, state]
|
75 |
).success(
|
76 |
-
predict_reco, [
|
77 |
)
|
78 |
|
79 |
app.queue(concurrency_count=4)
|
|
|
40 |
"""
|
41 |
Answering component
|
42 |
"""
|
43 |
+
answer = allo_chatbot.answer_optim_v2(input, preproc_history(history))
|
44 |
|
45 |
history.append({'role': 'user', 'content': None})
|
46 |
history.append({'role': 'assistant', 'content': answer})
|
47 |
|
48 |
return fetch_messages(history), history
|
49 |
|
50 |
+
def predict_reco(history):
|
51 |
"""
|
52 |
Reco component
|
53 |
"""
|
|
|
73 |
).success(
|
74 |
predict_answer, [txt, state], [chatbot, state]
|
75 |
).success(
|
76 |
+
predict_reco, [state], [chatbot, state]
|
77 |
)
|
78 |
|
79 |
app.queue(concurrency_count=4)
|
prompts/mod_prompt.py
CHANGED
@@ -15,7 +15,7 @@ NOTES:
|
|
15 |
|
16 |
ONLY ANSWER with either [True, False]
|
17 |
"""
|
18 |
-
|
19 |
You are MODERATOR.
|
20 |
You are to classify what the next Chatbot will be doing. The chatbot will assist in supermarket shopping and requires validation for any information relating to the supermarket
|
21 |
|
|
|
15 |
|
16 |
ONLY ANSWER with either [True, False]
|
17 |
"""
|
18 |
+
MOD_PROMPT_OPTIM_v2 = """
|
19 |
You are MODERATOR.
|
20 |
You are to classify what the next Chatbot will be doing. The chatbot will assist in supermarket shopping and requires validation for any information relating to the supermarket
|
21 |
|