ar-dy commited on
Commit
1b49043
·
1 Parent(s): 13727cd

first version optimized

Browse files
Files changed (7) hide show
  1. .gitignore +3 -3
  2. allofresh_chatbot.py +204 -203
  3. app.py +79 -68
  4. prompts/ans_prompt.py +79 -82
  5. prompts/mod_prompt.py +36 -16
  6. prompts/reco_prompt.py +4 -3
  7. sandbox.ipynb +207 -207
.gitignore CHANGED
@@ -1,4 +1,4 @@
1
- .env
2
- **__init__.py
3
- __pycache__
4
  .ipynb_checkpoints
 
1
+ .env
2
+ **__init__.py
3
+ __pycache__
4
  .ipynb_checkpoints
allofresh_chatbot.py CHANGED
@@ -1,203 +1,204 @@
1
- import os
2
- from dotenv import load_dotenv
3
-
4
- from langchain import PromptTemplate, LLMChain
5
- from langchain.agents import initialize_agent, Tool
6
- from langchain.chat_models import AzureChatOpenAI
7
- from langchain.agents import ZeroShotAgent, AgentExecutor
8
- from langchain.chains.conversation.memory import ConversationBufferMemory
9
- from langchain.callbacks import get_openai_callback
10
- from langchain.chains.llm import LLMChain
11
- from langchain.llms import AzureOpenAI
12
- from langchain.prompts import PromptTemplate
13
-
14
- from utils import lctool_search_allo_api, cut_dialogue_history
15
- from prompts.mod_prompt import MOD_PROMPT
16
- from prompts.ans_prompt import ANS_PREFIX, ANS_FORMAT_INSTRUCTIONS, ANS_SUFFIX, ANS_CHAIN_PROMPT
17
- from prompts.reco_prompt import RECO_PREFIX, RECO_FORMAT_INSTRUCTIONS, RECO_SUFFIX
18
-
19
- load_dotenv()
20
-
21
- class AllofreshChatbot():
22
- def __init__(self, debug=False):
23
- self.ans_memory = None
24
- self.debug = debug
25
-
26
- # init llm
27
- self.llms = self.init_llm()
28
- # init moderation chain
29
- self.mod_chain = self.init_mod_chain()
30
- # init answering agent
31
- self.ans_memory = self.init_ans_memory()
32
- self.ans_agent = self.init_ans_agent()
33
- self.ans_chain = self.init_ans_chain()
34
- # init reco agent
35
- self.reco_agent = self.init_reco_agent()
36
-
37
- def init_llm(self):
38
- return {
39
- "gpt-4": AzureChatOpenAI(
40
- temperature=0,
41
- deployment_name = os.getenv("DEPLOYMENT_NAME_GPT4"),
42
- model_name = os.getenv("MODEL_NAME_GPT4"),
43
- openai_api_type = os.getenv("OPENAI_API_TYPE"),
44
- openai_api_base = os.getenv("OPENAI_API_BASE"),
45
- openai_api_version = os.getenv("OPENAI_API_VERSION"),
46
- openai_api_key = os.getenv("OPENAI_API_KEY"),
47
- openai_organization = os.getenv("OPENAI_ORGANIZATION")
48
- ),
49
- "gpt-3.5": AzureChatOpenAI(
50
- temperature=0,
51
- deployment_name = os.getenv("DEPLOYMENT_NAME_GPT3.5"),
52
- model_name = os.getenv("MODEL_NAME_GPT3.5"),
53
- openai_api_type = os.getenv("OPENAI_API_TYPE"),
54
- openai_api_base = os.getenv("OPENAI_API_BASE"),
55
- openai_api_version = os.getenv("OPENAI_API_VERSION"),
56
- openai_api_key = os.getenv("OPENAI_API_KEY"),
57
- openai_organization = os.getenv("OPENAI_ORGANIZATION")
58
- ),
59
- "gpt-3": AzureOpenAI(
60
- temperature=0,
61
- deployment_name = os.getenv("DEPLOYMENT_NAME_GPT3"),
62
- model_name = os.getenv("MODEL_NAME_GPT3"),
63
- openai_api_base = os.getenv("OPENAI_API_BASE"),
64
- openai_api_key = os.getenv("OPENAI_API_KEY"),
65
- openai_organization = os.getenv("OPENAI_ORGANIZATION")
66
- ),
67
- }
68
-
69
- def init_mod_chain(self):
70
- mod_prompt = PromptTemplate(
71
- template=MOD_PROMPT,
72
- input_variables=["input"]
73
- )
74
-
75
- # Define the first LLM chain with the shared AzureOpenAI object and prompt template
76
- return LLMChain(llm=self.llms["gpt-4"], prompt=mod_prompt)
77
-
78
- def init_ans_memory(self):
79
- return ConversationBufferMemory(memory_key="chat_history", output_key='output')
80
-
81
- def init_ans_agent(self):
82
- ans_tools = [
83
- Tool(
84
- name="Product Search",
85
- func=lctool_search_allo_api,
86
- description="""
87
- To search for products in Allofresh's Database.
88
- Always use this to verify product names.
89
- Outputs product names and prices
90
- """
91
- )
92
- ]
93
-
94
- return initialize_agent(
95
- ans_tools,
96
- self.llms["gpt-4"],
97
- agent="conversational-react-description",
98
- verbose=self.debug,
99
- memory=self.ans_memory,
100
- return_intermediate_steps=True,
101
- agent_kwargs={
102
- 'prefix': ANS_PREFIX,
103
- # 'format_instructions': ANS_FORMAT_INSTRUCTIONS, # only needed for below gpt-4
104
- 'suffix': ANS_SUFFIX
105
- }
106
- )
107
-
108
- def init_ans_chain(self):
109
- ans_prompt = PromptTemplate(
110
- template=ANS_CHAIN_PROMPT,
111
- input_variables=["query"]
112
- )
113
-
114
- # Define the first LLM chain with the shared AzureOpenAI object and prompt template
115
- return LLMChain(llm=self.llms["gpt-4"], prompt=ans_prompt)
116
-
117
- def ans_pipeline(self, text, debug_cost=False, keep_last_n_words=500):
118
- try:
119
- self.ans_agent.memory.buffer = cut_dialogue_history(self.ans_agent.memory.buffer, keep_last_n_words=keep_last_n_words)
120
- except:
121
- pass
122
- finally:
123
- with get_openai_callback() as openai_cb:
124
- res = self.ans_agent({"input": text.strip()})
125
- response = res['output'].replace("\\", "/")
126
-
127
- if debug_cost:
128
- print(f"Total Tokens: {openai_cb.total_tokens}")
129
- print(f"Prompt Tokens: {openai_cb.prompt_tokens}")
130
- print(f"Completion Tokens: {openai_cb.completion_tokens}")
131
- print(f"Total Cost (USD): ${openai_cb.total_cost}")
132
-
133
- return response
134
-
135
- def init_reco_agent(self):
136
- reco_tools = [
137
- Tool(
138
- name="Product Search",
139
- func=lctool_search_allo_api,
140
- description="""
141
- To search for products in Allofresh's Database.
142
- Always use this to verify product names.
143
- Outputs product names and prices
144
- """
145
- ),
146
- Tool(
147
- name="No Recommendation",
148
- func=lambda x: "No recommendation",
149
- description="""
150
- Use this if based on the context you don't need to recommend any products
151
- """
152
- )
153
- ]
154
- prompt = ZeroShotAgent.create_prompt(
155
- reco_tools,
156
- prefix=RECO_PREFIX,
157
- format_instructions=RECO_FORMAT_INSTRUCTIONS,
158
- suffix=RECO_SUFFIX,
159
- input_variables=["input", "agent_scratchpad"]
160
- )
161
-
162
- llm_chain_reco = LLMChain(llm=self.llms["gpt-4"], prompt=prompt)
163
- agent_reco = ZeroShotAgent(llm_chain=llm_chain_reco, allowed_tools=[tool.name for tool in reco_tools])
164
- return AgentExecutor.from_agent_and_tools(agent=agent_reco, tools=reco_tools, verbose=self.debug)
165
-
166
- def answer(self, query):
167
- # moderate
168
- mod_verdict = self.mod_chain.run({"query": query})
169
- # if pass moderation
170
- if mod_verdict == "True":
171
- # answer question
172
- answer = self.ans_pipeline(query)
173
- # recommend
174
- reco = self.reco_agent.run({"input": self.ans_agent.memory.buffer})
175
- if len(reco) > 0:
176
- self.ans_agent.memory.chat_memory.add_ai_message(reco)
177
- # construct output
178
- return (answer, reco)
179
- else:
180
- return (
181
- "Maaf saya tidak bisa membantu Anda untuk itu... tapi silakan tanya Allofresh-Assistant apa saja terkait kebutuhan berbelanja Anda!",
182
- ""
183
- )
184
-
185
- def answer_optim_v1(self, query, chat_history):
186
- """
187
- We plugged off the tools from the 'answering' component and replaced it with a simple chain
188
- """
189
- # moderate
190
- mod_verdict = self.mod_chain.run({"input": query})
191
- # if pass moderation
192
- if mod_verdict == "True":
193
- # answer question
194
- answer = self.ans_chain.run({"input": query, "chat_history": chat_history})
195
- # recommend
196
- reco = self.reco_agent.run({"input": chat_history})
197
- # construct output
198
- return (answer, reco)
199
- else:
200
- return (
201
- "Maaf saya tidak bisa membantu Anda untuk itu... tapi silakan tanya Allofresh-Assistant apa saja terkait makanan atau resep yang Anda inginkan!",
202
- None
203
- )
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ import re
4
+
5
+ from langchain import PromptTemplate, LLMChain
6
+ from langchain.agents import initialize_agent, Tool
7
+ from langchain.chat_models import AzureChatOpenAI
8
+ from langchain.agents import ZeroShotAgent, AgentExecutor
9
+ from langchain.chains.conversation.memory import ConversationBufferMemory
10
+ from langchain.callbacks import get_openai_callback
11
+ from langchain.chains.llm import LLMChain
12
+ from langchain.llms import AzureOpenAI
13
+ from langchain.prompts import PromptTemplate
14
+
15
+ from utils import lctool_search_allo_api, cut_dialogue_history
16
+ from prompts.mod_prompt import MOD_PROMPT, FALLBACK_MESSAGE
17
+ from prompts.ans_prompt import ANS_PREFIX, ANS_FORMAT_INSTRUCTIONS, ANS_SUFFIX, ANS_CHAIN_PROMPT
18
+ from prompts.reco_prompt import RECO_PREFIX, RECO_FORMAT_INSTRUCTIONS, RECO_SUFFIX, NO_RECO_OUTPUT
19
+
20
+ load_dotenv()
21
+
22
+ class AllofreshChatbot():
23
+ def __init__(self, debug=False):
24
+ self.ans_memory = None
25
+ self.debug = debug
26
+
27
+ # init llm
28
+ self.llms = self.init_llm()
29
+ # init moderation chain
30
+ self.mod_chain = self.init_mod_chain()
31
+ # init answering agent
32
+ self.ans_memory = self.init_ans_memory()
33
+ self.ans_agent = self.init_ans_agent()
34
+ self.ans_chain = self.init_ans_chain()
35
+ # init reco agent
36
+ self.reco_agent = self.init_reco_agent()
37
+
38
+ def init_llm(self):
39
+ return {
40
+ "gpt-4": AzureChatOpenAI(
41
+ temperature=0,
42
+ deployment_name = os.getenv("DEPLOYMENT_NAME_GPT4"),
43
+ model_name = os.getenv("MODEL_NAME_GPT4"),
44
+ openai_api_type = os.getenv("OPENAI_API_TYPE"),
45
+ openai_api_base = os.getenv("OPENAI_API_BASE"),
46
+ openai_api_version = os.getenv("OPENAI_API_VERSION"),
47
+ openai_api_key = os.getenv("OPENAI_API_KEY"),
48
+ openai_organization = os.getenv("OPENAI_ORGANIZATION")
49
+ ),
50
+ "gpt-3.5": AzureChatOpenAI(
51
+ temperature=0,
52
+ deployment_name = os.getenv("DEPLOYMENT_NAME_GPT3.5"),
53
+ model_name = os.getenv("MODEL_NAME_GPT3.5"),
54
+ openai_api_type = os.getenv("OPENAI_API_TYPE"),
55
+ openai_api_base = os.getenv("OPENAI_API_BASE"),
56
+ openai_api_version = os.getenv("OPENAI_API_VERSION"),
57
+ openai_api_key = os.getenv("OPENAI_API_KEY"),
58
+ openai_organization = os.getenv("OPENAI_ORGANIZATION")
59
+ ),
60
+ "gpt-3": AzureOpenAI(
61
+ temperature=0,
62
+ deployment_name = os.getenv("DEPLOYMENT_NAME_GPT3"),
63
+ model_name = os.getenv("MODEL_NAME_GPT3"),
64
+ openai_api_base = os.getenv("OPENAI_API_BASE"),
65
+ openai_api_key = os.getenv("OPENAI_API_KEY"),
66
+ openai_organization = os.getenv("OPENAI_ORGANIZATION")
67
+ ),
68
+ }
69
+
70
+ def init_mod_chain(self):
71
+ mod_prompt = PromptTemplate(
72
+ template=MOD_PROMPT,
73
+ input_variables=["input"]
74
+ )
75
+
76
+ # Define the first LLM chain with the shared AzureOpenAI object and prompt template
77
+ return LLMChain(llm=self.llms["gpt-4"], prompt=mod_prompt)
78
+
79
+ def init_ans_memory(self):
80
+ return ConversationBufferMemory(memory_key="chat_history", output_key='output')
81
+
82
+ def init_ans_agent(self):
83
+ ans_tools = [
84
+ Tool(
85
+ name="Product Search",
86
+ func=lctool_search_allo_api,
87
+ description="""
88
+ To search for products in Allofresh's Database.
89
+ Always use this to verify product names.
90
+ Outputs product names and prices
91
+ """
92
+ )
93
+ ]
94
+
95
+ return initialize_agent(
96
+ ans_tools,
97
+ self.llms["gpt-4"],
98
+ agent="conversational-react-description",
99
+ verbose=self.debug,
100
+ memory=self.ans_memory,
101
+ return_intermediate_steps=True,
102
+ agent_kwargs={
103
+ 'prefix': ANS_PREFIX,
104
+ # 'format_instructions': ANS_FORMAT_INSTRUCTIONS, # only needed for below gpt-4
105
+ 'suffix': ANS_SUFFIX
106
+ }
107
+ )
108
+
109
+ def init_ans_chain(self):
110
+ ans_prompt = PromptTemplate(
111
+ template=ANS_CHAIN_PROMPT,
112
+ input_variables=["input", "chat_history"]
113
+ )
114
+
115
+ # Define the first LLM chain with the shared AzureOpenAI object and prompt template
116
+ return LLMChain(llm=self.llms["gpt-4"], prompt=ans_prompt)
117
+
118
+ def ans_pipeline(self, text, debug_cost=False, keep_last_n_words=500):
119
+ try:
120
+ self.ans_agent.memory.buffer = cut_dialogue_history(self.ans_agent.memory.buffer, keep_last_n_words=keep_last_n_words)
121
+ except:
122
+ pass
123
+ finally:
124
+ with get_openai_callback() as openai_cb:
125
+ res = self.ans_agent({"input": text.strip()})
126
+ response = res['output'].replace("\\", "/")
127
+
128
+ if debug_cost:
129
+ print(f"Total Tokens: {openai_cb.total_tokens}")
130
+ print(f"Prompt Tokens: {openai_cb.prompt_tokens}")
131
+ print(f"Completion Tokens: {openai_cb.completion_tokens}")
132
+ print(f"Total Cost (USD): ${openai_cb.total_cost}")
133
+
134
+ return response
135
+
136
+ def init_reco_agent(self):
137
+ reco_tools = [
138
+ Tool(
139
+ name="Product Search",
140
+ func=lctool_search_allo_api,
141
+ description="""
142
+ To search for products in Allofresh's Database.
143
+ Always use this to verify product names.
144
+ Outputs product names and prices
145
+ """
146
+ ),
147
+ Tool(
148
+ name="No Recommendation",
149
+ func=lambda x: "No recommendation",
150
+ description="""
151
+ Use this if based on the context you don't need to recommend any products
152
+ """
153
+ )
154
+ ]
155
+ prompt = ZeroShotAgent.create_prompt(
156
+ reco_tools,
157
+ prefix=RECO_PREFIX,
158
+ format_instructions=RECO_FORMAT_INSTRUCTIONS,
159
+ suffix=RECO_SUFFIX,
160
+ input_variables=["input", "agent_scratchpad"]
161
+ )
162
+
163
+ llm_chain_reco = LLMChain(llm=self.llms["gpt-4"], prompt=prompt)
164
+ agent_reco = ZeroShotAgent(llm_chain=llm_chain_reco, allowed_tools=[tool.name for tool in reco_tools])
165
+ return AgentExecutor.from_agent_and_tools(agent=agent_reco, tools=reco_tools, verbose=self.debug)
166
+
167
+ def answer(self, query):
168
+ # moderate
169
+ mod_verdict = self.mod_chain.run({"query": query})
170
+ # if pass moderation
171
+ if mod_verdict == "True":
172
+ # answer question
173
+ answer = self.ans_pipeline(query)
174
+ # recommend
175
+ reco = self.reco_agent.run({"input": self.ans_agent.memory.buffer})
176
+ if len(reco) > 0:
177
+ self.ans_agent.memory.chat_memory.add_ai_message(reco)
178
+ # construct output
179
+ return (answer, reco)
180
+ else:
181
+ return (
182
+ FALLBACK_MESSAGE,
183
+ None
184
+ )
185
+
186
+ def answer_optim_v1(self, query, chat_history):
187
+ """
188
+ We plugged off the tools from the 'answering' component and replaced it with a simple chain
189
+ """
190
+ # moderate
191
+ mod_verdict = self.mod_chain.run({"input": query})
192
+ # if pass moderation
193
+ if mod_verdict == "True":
194
+ # answer question
195
+ return self.ans_chain.run({"input": query, "chat_history": str(chat_history)})
196
+ return FALLBACK_MESSAGE
197
+
198
+ def reco_optim_v1(self, chat_history):
199
+ """
200
+ We plugged off the tools from the 'answering' component and replaced it with a simple chain
201
+ """
202
+ reco = self.reco_agent.run({"input": chat_history})
203
+ # filter out reco (str) to only contain alphabeticals
204
+ return reco if reco != NO_RECO_OUTPUT else None
app.py CHANGED
@@ -1,69 +1,80 @@
1
- import gradio as gr
2
-
3
- from loguru import logger
4
- from pydantic import BaseModel
5
-
6
- from allofresh_chatbot import AllofreshChatbot
7
- from utils import cut_dialogue_history
8
-
9
- allo_chatbot = AllofreshChatbot(debug=True)
10
-
11
- class Message(BaseModel):
12
- role: str
13
- content: str
14
-
15
- def fetch_messages(history):
16
- """
17
- Fetch the messages from the chat history.
18
- """
19
- return [(history[i]["content"], history[i+1]["content"]) for i in range(0, len(history)-1, 2)]
20
-
21
- def preproc_history(history):
22
- """
23
- Clean the chat history to remove the None values.
24
- """
25
- clean_history = [Message(**msg) for msg in history if msg["content"] is not None]
26
- return str(cut_dialogue_history(clean_history))
27
-
28
- def user_input(input, history):
29
- """
30
- Add the user input to the chat history.
31
- """
32
- history.append({'role': 'user', 'content': input})
33
- history.append({'role': 'assistant', 'content': None}})
34
-
35
- return fetch_messages(history), history
36
-
37
- def predict_answer(input, history):
38
- """
39
- Predict the response of the chatbot and complete a running list of chat history.
40
- """
41
- answer = allo_chatbot.answer_optim_v1(input, preproc_history(history))
42
-
43
- history.append({'role': 'user', 'content': None})
44
- history.append({'role': 'assistant', 'content': answer})
45
-
46
- return fetch_messages(history), history
47
-
48
- def predict_reco(input, history):
49
-
50
-
51
- """
52
- Gradio Blocks low-level API that allows to create custom web applications (here our chat app)
53
- """
54
- with gr.Blocks() as app:
55
- logger.info("Starting app...")
56
- chatbot = gr.Chatbot(label="Allofresh Assistant")
57
- state = gr.State([])
58
- with gr.Row():
59
- txt = gr.Textbox(show_label=False, placeholder="Enter text, then press enter").style(container=False)
60
- txt.submit(
61
-
62
- ).success(
63
- predict, [txt, state], [chatbot, state]
64
- ).success(
65
- predict, [txt, state], [chatbot, state]
66
- )
67
-
68
- app.queue(concurrency_count=4)
 
 
 
 
 
 
 
 
 
 
 
69
  app.launch()
 
1
+ import gradio as gr
2
+
3
+ from loguru import logger
4
+ from pydantic import BaseModel
5
+ from ast import literal_eval
6
+
7
+ from allofresh_chatbot import AllofreshChatbot
8
+ from utils import cut_dialogue_history
9
+ from prompts.mod_prompt import FALLBACK_MESSAGE
10
+
11
+ allo_chatbot = AllofreshChatbot(debug=True)
12
+
13
+ class Message(BaseModel):
14
+ role: str
15
+ content: str
16
+
17
+ def fetch_messages(history):
18
+ """
19
+ Fetch the messages from the chat history.
20
+ """
21
+ return [(history[i]["content"], history[i+1]["content"]) for i in range(0, len(history)-1, 2)]
22
+
23
+ def preproc_history(history):
24
+ """
25
+ Clean the chat history to remove the None values.
26
+ """
27
+ clean_history = [Message(**msg) for msg in history if msg["content"] is not None]
28
+ return cut_dialogue_history(str(clean_history))
29
+
30
+ def user_input(input, history):
31
+ """
32
+ Add the user input to the chat history.
33
+ """
34
+ history.append({'role': 'user', 'content': input})
35
+ history.append({'role': 'assistant', 'content': None})
36
+
37
+ return fetch_messages(history), history
38
+
39
+ def predict_answer(input, history):
40
+ """
41
+ Answering component
42
+ """
43
+ answer = allo_chatbot.answer_optim_v1(input, preproc_history(history))
44
+
45
+ history.append({'role': 'user', 'content': None})
46
+ history.append({'role': 'assistant', 'content': answer})
47
+
48
+ return fetch_messages(history), history
49
+
50
+ def predict_reco(input, history):
51
+ """
52
+ Reco component
53
+ """
54
+ if history[-1]["content"] != FALLBACK_MESSAGE:
55
+ reco = allo_chatbot.reco_optim_v1(preproc_history(history))
56
+
57
+ history.append({'role': 'user', 'content': None})
58
+ history.append({'role': 'assistant', 'content': reco})
59
+
60
+ return fetch_messages(history), history
61
+
62
+ """
63
+ Gradio Blocks low-level API that allows to create custom web applications (here our chat app)
64
+ """
65
+ with gr.Blocks() as app:
66
+ logger.info("Starting app...")
67
+ chatbot = gr.Chatbot(label="Allofresh Assistant")
68
+ state = gr.State([])
69
+ with gr.Row():
70
+ txt = gr.Textbox(show_label=False, placeholder="Enter text, then press enter").style(container=False)
71
+ txt.submit(
72
+ user_input, [txt, state], [chatbot, state]
73
+ ).success(
74
+ predict_answer, [txt, state], [chatbot, state]
75
+ ).success(
76
+ predict_reco, [txt, state], [chatbot, state]
77
+ )
78
+
79
+ app.queue(concurrency_count=4)
80
  app.launch()
prompts/ans_prompt.py CHANGED
@@ -1,83 +1,80 @@
1
- ANS_PREFIX = """
2
- You are Allofresh-Assistant, an AI language model that has been trained to serve Allofresh,
3
- an online e-grocery platform selling supermarket products with a focus on fresh produces.
4
- Your primary function is to assist customers with their shopping needs,
5
- including but not limited to answering questions on products and services offered Allofresh.
6
-
7
- You can answer questions regarding what people can do with the products they buy at Allofresh.
8
- e.g. food and recipes as it will nudge people to buy products
9
-
10
- If a customer asks you a question that falls outside of your function or knowledge as an online supermarket assistant,
11
- you must politely decline to answer and redirect the conversation back to your area of expertise.
12
-
13
- You have access to the supermarket's knowledge base (products, vouchers, etc.).
14
- You should use this information to provide accurate and helpful responses to customer inquiries.
15
- You must remember the name and description of each tool.
16
- Customers might give you questions which you can answer without tools,
17
- but questions which requires specific knowledge regarding the supermarket must be validated to the knowledge base.
18
- If you can't answer a question with or without tools, politely apologize that you don't know.
19
-
20
- You must answer in formal yet friendly bahasa Indonesia.
21
-
22
-
23
- TOOLS:
24
- ------
25
- """
26
- ANS_FORMAT_INSTRUCTIONS = """
27
- To use a tool, please use the following format:
28
-
29
- ```
30
- Thought: Do I need to use a tool? Yes
31
- Action: the action to take, should be one of [{tool_names}]
32
- Action Input: the input to the action
33
- Observation: the result of the action
34
- ```
35
-
36
- When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
37
-
38
- ```
39
- Thought: Do I need to use a tool? No
40
- ```
41
-
42
- Finally, whether you used the tool or not, output the answer
43
- {ai_prefix}: [your response here]
44
- """
45
- ANS_SUFFIX = """
46
- You are very strict on correctness and will never fake an information regarding the supermarket (product names, location, price, vouchers, etc.).
47
- Therefore you must validate every information related to Allofresh to Allofresh's knowledge base
48
- You must answer the user's question as informative as possible
49
-
50
- Take into account the previous conversation history:
51
- {chat_history}
52
-
53
- Begin! Remember you must give the final answer in bahasa indonesia
54
-
55
- New Input: {input}
56
- {agent_scratchpad}
57
- ...
58
- """
59
- ANS_CHAIN_PROMPT = """
60
- You are Allofresh-Assistant, an AI language model that has been trained to serve Allofresh,
61
- an online e-grocery platform selling supermarket products with a focus on fresh produces.
62
- Your primary function is to assist customers with their shopping needs,
63
- including but not limited to answering questions on products and services offered Allofresh.
64
-
65
- You can answer questions regarding what people can do with the products they buy at Allofresh.
66
- e.g. food and recipes as it will nudge people to buy products
67
-
68
- If a customer asks you a question that falls outside of your function or knowledge as an online supermarket assistant,
69
- you must politely decline to answer and redirect the conversation back to your area of expertise.
70
-
71
- You are very strict on correctness and will never fake an information regarding the supermarket (product names, location, price, vouchers, etc.).
72
- When you encounter a question that requires validation to the supermarket's knowledge base, politely ask the user to wait while you validate the information.
73
-
74
- Take into account the previous conversation history:
75
- {chat_history}
76
-
77
- New user input: {input}
78
-
79
- Remember! You must answer in formal yet friendly bahasa Indonesia.
80
-
81
- Answer:
82
- ...
83
  """
 
1
+ ANS_PREFIX = """
2
+ You are Allofresh-Assistant, an AI language model that has been trained to serve Allofresh,
3
+ an online e-grocery platform selling supermarket products with a focus on fresh produces.
4
+ Your primary function is to assist customers with their shopping needs,
5
+ including but not limited to answering questions on products and services offered Allofresh.
6
+
7
+ You can answer questions regarding what people can do with the products they buy at Allofresh.
8
+ e.g. food and recipes as it will nudge people to buy products
9
+
10
+ If a customer asks you a question that falls outside of your function or knowledge as an online supermarket assistant,
11
+ you must politely decline to answer and redirect the conversation back to your area of expertise.
12
+
13
+ You have access to the supermarket's knowledge base (products, vouchers, etc.).
14
+ You should use this information to provide accurate and helpful responses to customer inquiries.
15
+ You must remember the name and description of each tool.
16
+ Customers might give you questions which you can answer without tools,
17
+ but questions which requires specific knowledge regarding the supermarket must be validated to the knowledge base.
18
+ If you can't answer a question with or without tools, politely apologize that you don't know.
19
+
20
+ You must answer in formal yet friendly bahasa Indonesia.
21
+
22
+
23
+ TOOLS:
24
+ ------
25
+ """
26
+ ANS_FORMAT_INSTRUCTIONS = """
27
+ To use a tool, please use the following format:
28
+
29
+ ```
30
+ Thought: Do I need to use a tool? Yes
31
+ Action: the action to take, should be one of [{tool_names}]
32
+ Action Input: the input to the action
33
+ Observation: the result of the action
34
+ ```
35
+
36
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
37
+
38
+ ```
39
+ Thought: Do I need to use a tool? No
40
+ ```
41
+
42
+ Finally, whether you used the tool or not, output the answer
43
+ {ai_prefix}: [your response here]
44
+ """
45
+ ANS_SUFFIX = """
46
+ You are very strict on correctness and will never fake an information regarding the supermarket (product names, location, price, vouchers, etc.).
47
+ Therefore you must validate every information related to Allofresh to Allofresh's knowledge base
48
+ You must answer the user's question as informative as possible
49
+
50
+ Take into account the previous conversation history:
51
+ {chat_history}
52
+
53
+ Begin! Remember you must give the final answer in bahasa indonesia
54
+
55
+ New Input: {input}
56
+ {agent_scratchpad}
57
+ ...
58
+ """
59
+ # restricted chain scope to only answering food related questions
60
+ ANS_CHAIN_PROMPT = """
61
+ You are Allofresh-Assistant, an AI language model that has been trained to serve Allofresh,
62
+ an online e-grocery platform selling supermarket products with a focus on fresh produces.
63
+ Your function is to assist customers with their inquiry on foods and recipes, while recommending Allofresh products,
64
+
65
+ If a customer asks you a question that falls outside of your function or knowledge as an online supermarket assistant,
66
+ you must politely decline to answer and redirect the conversation back to your area of expertise.
67
+
68
+ You are very strict on correctness and will never fake an information regarding the supermarket (product names, location, price, vouchers, etc.).
69
+ When you encounter a question that requires validation to the supermarket's knowledge base, politely ask the user to wait while you validate the information.
70
+
71
+ Take into account the previous conversation history:
72
+ {chat_history}
73
+
74
+ New user input: {input}
75
+
76
+ Remember! You must answer in formal yet friendly bahasa Indonesia.
77
+
78
+ Answer:
79
+ ...
 
 
 
80
  """
prompts/mod_prompt.py CHANGED
@@ -1,16 +1,36 @@
1
- MOD_PROMPT = """
2
- You are MODERATOR.
3
- MODERATOR MUST ONLY classify whether a certain passage belongs to a certain topic.
4
-
5
- INPUT: {input}
6
-
7
- INSTRUCTIONS:
8
- Classify WHETHER OR NOT input is RELATED to EITHER of the following:
9
- - greetings
10
- - foods, ingredients, food recipes
11
-
12
- NOTES:
13
- - the query might be in bahasa indonesia, english, or a combination of both. you must take into account for both languages
14
-
15
- ONLY ANSWER with either [True, False]
16
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MOD_PROMPT = """
2
+ You are MODERATOR.
3
+ MODERATOR MUST ONLY classify whether a certain passage belongs to a certain topic.
4
+
5
+ INPUT: {input}
6
+
7
+ INSTRUCTIONS:
8
+ Classify WHETHER OR NOT input is RELATED to EITHER of the following:
9
+ - inquiry on chatbot ability
10
+ - greetings
11
+ - foods, ingredients, food recipes
12
+
13
+ NOTES:
14
+ - the query might be in bahasa indonesia, english, or a combination of both. you must take into account for both languages
15
+
16
+ ONLY ANSWER with either [True, False]
17
+ """
18
+ MOD_PROMPT_OPTIM_v1 = """
19
+ You are MODERATOR.
20
+ You are to classify what the next Chatbot will be doing. The chatbot will assist in supermarket shopping and requires validation for any information relating to the supermarket
21
+
22
+ INPUT: {input}
23
+
24
+ INSTRUCTIONS:
25
+ Classify WHETHER OR NOT input is RELATED to EITHER of the following:
26
+ - inquiry on chatbot ability
27
+ - greetings
28
+ - foods, ingredients, food recipes
29
+
30
+ Answer ANS_AGENT if INPUT is related to either topics and you need to access the supermarket's knowledge base
31
+ Answer ANS_CHAIN if INPUT is related to either topics and you do not need to access the supermarket's knowledge base
32
+ Answer FALLBACK if INPUT is not related to either topics
33
+
34
+ ONLY ANSWER with either [ANS_AGENT, ANS_CHAIN, FALLBACK]
35
+ """
36
+ FALLBACK_MESSAGE = """Maaf saya tidak bisa membantu Anda untuk itu... tapi silakan tanya Allofresh-Assistant apa saja terkait makanan atau resep yang Anda inginkan!"""
prompts/reco_prompt.py CHANGED
@@ -3,7 +3,7 @@ RECO_PREFIX = """
3
  You are serving Allofresh, an online e-grocery platform selling supermarket products with a focus on fresh produces.
4
  You have the capability to assess the context and determine whether it's appropriate to recommend a product or not
5
  You are to evaluate another LLM's output and determine what products to recommend to user based on the output.
6
- You NEVER make up product names, and will always check the product database for available products
7
  You must answer in formal yet friendly bahasa Indonesia.
8
  """
9
  # this is only used for gpt-3.5-turbo and below
@@ -17,7 +17,7 @@ Action Input: If you want to recommend products, pass the list of products you w
17
  Observation: the result of the action
18
  ... (this Thought/Action/Action Input/Observation can repeat N times)
19
  Thought: I now know the final answer
20
- Final Answer: if no need to recommend product, only output empty string, else recommend all relevant products based on observation result
21
  """
22
  RECO_GPT3_ADD_FORMAT_INSTRUCTIONS = """
23
  Example of recommending products
@@ -41,4 +41,5 @@ RECO_SUFFIX = """
41
 
42
  Context: {input}
43
  {agent_scratchpad}
44
- """
 
 
3
  You are serving Allofresh, an online e-grocery platform selling supermarket products with a focus on fresh produces.
4
  You have the capability to assess the context and determine whether it's appropriate to recommend a product or not
5
  You are to evaluate another LLM's output and determine what products to recommend to user based on the output.
6
+ You MUST NEVER make up product names, and will always check the product database for available products
7
  You must answer in formal yet friendly bahasa Indonesia.
8
  """
9
  # this is only used for gpt-3.5-turbo and below
 
17
  Observation: the result of the action
18
  ... (this Thought/Action/Action Input/Observation can repeat N times)
19
  Thought: I now know the final answer
20
+ Final Answer: if no need to recommend product, output NO RECOMMENDATION, else recommend all relevant products based on observation result
21
  """
22
  RECO_GPT3_ADD_FORMAT_INSTRUCTIONS = """
23
  Example of recommending products
 
41
 
42
  Context: {input}
43
  {agent_scratchpad}
44
+ """
45
+ NO_RECO_OUTPUT = "NO RECOMMENDATION"
sandbox.ipynb CHANGED
@@ -1,207 +1,207 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 1,
6
- "id": "d00fd328",
7
- "metadata": {
8
- "scrolled": true
9
- },
10
- "outputs": [],
11
- "source": [
12
- "from allofresh_chatbot import AllofreshChatbot"
13
- ]
14
- },
15
- {
16
- "cell_type": "code",
17
- "execution_count": 2,
18
- "id": "a460d797",
19
- "metadata": {},
20
- "outputs": [],
21
- "source": [
22
- "cb = AllofreshChatbot(debug=True, streaming=True)"
23
- ]
24
- },
25
- {
26
- "cell_type": "code",
27
- "execution_count": 4,
28
- "id": "9fac3062",
29
- "metadata": {},
30
- "outputs": [
31
- {
32
- "data": {
33
- "text/plain": [
34
- "AzureChatOpenAI(verbose=False, callbacks=[<langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at 0x7f0110661520>], callback_manager=None, client=<class 'openai.api_resources.chat_completion.ChatCompletion'>, model_name='gpt-4', temperature=0.0, model_kwargs={}, openai_api_key='27becc6ad5ad4bf598e283834b2283d2', openai_organization='org-jh9tj9m1gO54wupk6wUxID6V', request_timeout=None, max_retries=6, streaming=True, n=1, max_tokens=None, deployment_name='dev-gpt-4', openai_api_type='azure', openai_api_base='https://dev-gpt.openai.azure.com/', openai_api_version='2023-03-15-preview')"
35
- ]
36
- },
37
- "execution_count": 4,
38
- "metadata": {},
39
- "output_type": "execute_result"
40
- }
41
- ],
42
- "source": [
43
- "cb.llms[\"gpt-4-streaming\"]"
44
- ]
45
- },
46
- {
47
- "cell_type": "code",
48
- "execution_count": 3,
49
- "id": "2df2878f",
50
- "metadata": {},
51
- "outputs": [
52
- {
53
- "ename": "ValueError",
54
- "evalue": "`run` not supported when there is not exactly one output key. Got ['output', 'intermediate_steps'].",
55
- "output_type": "error",
56
- "traceback": [
57
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
58
- "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
59
- "\u001b[0;32m/tmp/ipykernel_271/3973943316.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mcb\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mans_agent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"halo!\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
60
- "\u001b[0;32m~/anaconda3/lib/python3.9/site-packages/langchain/chains/base.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, callbacks, *args, **kwargs)\u001b[0m\n\u001b[1;32m 226\u001b[0m \u001b[0;34m\"\"\"Run the chain as text in, text out or multiple variables, text out.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 227\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_keys\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 228\u001b[0;31m raise ValueError(\n\u001b[0m\u001b[1;32m 229\u001b[0m \u001b[0;34mf\"`run` not supported when there is not exactly \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 230\u001b[0m \u001b[0;34mf\"one output key. Got {self.output_keys}.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
61
- "\u001b[0;31mValueError\u001b[0m: `run` not supported when there is not exactly one output key. Got ['output', 'intermediate_steps']."
62
- ]
63
- }
64
- ],
65
- "source": [
66
- "cb.ans_agent.run(\"halo!\")"
67
- ]
68
- },
69
- {
70
- "cell_type": "code",
71
- "execution_count": 4,
72
- "id": "d88b15ff",
73
- "metadata": {},
74
- "outputs": [
75
- {
76
- "data": {
77
- "text/plain": [
78
- "ConversationBufferMemory(chat_memory=ChatMessageHistory(messages=[]), output_key=None, input_key=None, return_messages=False, human_prefix='Human', ai_prefix='AI', memory_key='chat_history')"
79
- ]
80
- },
81
- "execution_count": 4,
82
- "metadata": {},
83
- "output_type": "execute_result"
84
- }
85
- ],
86
- "source": [
87
- "cb.ans_memory"
88
- ]
89
- },
90
- {
91
- "cell_type": "code",
92
- "execution_count": 4,
93
- "id": "4b28b557",
94
- "metadata": {},
95
- "outputs": [
96
- {
97
- "data": {
98
- "text/plain": [
99
- "AgentExecutor(memory=ConversationBufferMemory(chat_memory=ChatMessageHistory(messages=[HumanMessage(content='halo', additional_kwargs={}, example=False), AIMessage(content='Halo! Selamat datang di Allofresh. Bagaimana saya bisa membantu Anda hari ini?', additional_kwargs={}, example=False)]), output_key='output', input_key=None, return_messages=False, human_prefix='Human', ai_prefix='AI', memory_key='chat_history'), callbacks=None, callback_manager=None, verbose=True, agent=ConversationalAgent(llm_chain=LLMChain(memory=None, callbacks=None, callback_manager=None, verbose=False, prompt=PromptTemplate(input_variables=['input', 'chat_history', 'agent_scratchpad'], output_parser=None, partial_variables={}, template=\"\\nYou are Allofresh-Assistant, an AI language model that has been trained to serve Allofresh, \\nan online e-grocery platform selling supermarket products with a focus on fresh produces. \\nYour primary function is to assist customers with their shopping needs, \\nincluding but not limited to answering questions on products and services offered Allofresh.\\n\\nYou have access to the supermarket's knowledge base (products, vouchers, etc.). \\nYou should use this information to provide accurate and helpful responses to customer inquiries. \\nYou must remember the name and description of each tool. \\nCustomers might give you questions which you can answer without tools, \\nbut questions which requires specific knowledge regarding the supermarket must be validated to the knowledge base. \\nIf you can't answer a question with or without tools, politely apologize that you don't know.\\n\\nYou must answer in formal yet friendly bahasa Indonesia.\\n\\n\\nTOOLS:\\n------\\n\\n\\n> Product Search: \\n To search for products in Allofresh's Database. \\n Always use this to verify product names. \\n Outputs product names and prices\\n \\n\\nTo use a tool, please use the following format:\\n\\n```\\nThought: Do I need to use a tool? Yes\\nAction: the action to take, should be one of [Product Search]\\nAction Input: the input to the action\\nObservation: the result of the action\\n```\\n\\nWhen you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:\\n\\n```\\nThought: Do I need to use a tool? No\\nAI: [your response here]\\n```\\n\\n\\nYou are very strict on correctness and will never fake an information regarding the supermarket (product names, location, price, vouchers, etc.).\\nYou must validate every information related to Allofresh to Allofresh's knowledge base \\nYou must answer the user's question as informative as possible\\n\\nTake into account the previous conversation history:\\n{chat_history}\\n\\nBegin! Remember you must give the final answer in bahasa indonesia\\n\\nNew Input: {input}\\n{agent_scratchpad}\\n...\\n\", template_format='f-string', validate_template=True), llm=AzureChatOpenAI(verbose=False, callbacks=[<utils.FinalStreamingStdOutCallbackHandler object at 0x7f9bcb4fe5e0>], callback_manager=None, client=<class 'openai.api_resources.chat_completion.ChatCompletion'>, model_name='gpt-4', temperature=0.0, model_kwargs={}, openai_api_key='27becc6ad5ad4bf598e283834b2283d2', openai_organization='org-jh9tj9m1gO54wupk6wUxID6V', request_timeout=None, max_retries=6, streaming=True, n=1, max_tokens=None, deployment_name='dev-gpt-4', openai_api_type='azure', openai_api_base='https://dev-gpt.openai.azure.com/', openai_api_version='2023-03-15-preview'), output_key='text'), output_parser=ConvoOutputParser(ai_prefix='AI'), allowed_tools=['Product Search'], ai_prefix='AI'), tools=[Tool(name='Product Search', description=\"\\n To search for products in Allofresh's Database. \\n Always use this to verify product names. \\n Outputs product names and prices\\n \", args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, func=<function lctool_search_allo_api at 0x7f9bcb4edd30>, coroutine=None)], return_intermediate_steps=True, max_iterations=15, max_execution_time=None, early_stopping_method='force', handle_parsing_errors=False)"
100
- ]
101
- },
102
- "execution_count": 4,
103
- "metadata": {},
104
- "output_type": "execute_result"
105
- }
106
- ],
107
- "source": [
108
- "cb.ans_agent"
109
- ]
110
- },
111
- {
112
- "cell_type": "code",
113
- "execution_count": 4,
114
- "id": "b389bbe3",
115
- "metadata": {
116
- "scrolled": true
117
- },
118
- "outputs": [
119
- {
120
- "name": "stdout",
121
- "output_type": "stream",
122
- "text": [
123
- "\n",
124
- "\n",
125
- "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n"
126
- ]
127
- },
128
- {
129
- "name": "stderr",
130
- "output_type": "stream",
131
- "text": [
132
- "/home/ardyh/anaconda3/lib/python3.9/site-packages/langchain/callbacks/manager.py:90: RuntimeWarning: coroutine 'AsyncIteratorCallbackHandler.on_llm_start' was never awaited\n",
133
- " getattr(handler, event_name)(*args, **kwargs)\n",
134
- "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n",
135
- "/home/ardyh/anaconda3/lib/python3.9/site-packages/langchain/callbacks/manager.py:90: RuntimeWarning: coroutine 'AsyncIteratorCallbackHandler.on_llm_new_token' was never awaited\n",
136
- " getattr(handler, event_name)(*args, **kwargs)\n",
137
- "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n"
138
- ]
139
- },
140
- {
141
- "name": "stdout",
142
- "output_type": "stream",
143
- "text": [
144
- "\u001b[32;1m\u001b[1;3mThought: Do I need to use a tool? No\n",
145
- "AI: Halo! Selamat datang di Allofresh. Bagaimana saya bisa membantu Anda hari ini?\u001b[0m\n",
146
- "\n",
147
- "\u001b[1m> Finished chain.\u001b[0m\n"
148
- ]
149
- },
150
- {
151
- "name": "stderr",
152
- "output_type": "stream",
153
- "text": [
154
- "/home/ardyh/anaconda3/lib/python3.9/site-packages/langchain/callbacks/manager.py:90: RuntimeWarning: coroutine 'AsyncIteratorCallbackHandler.on_llm_end' was never awaited\n",
155
- " getattr(handler, event_name)(*args, **kwargs)\n",
156
- "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n"
157
- ]
158
- },
159
- {
160
- "data": {
161
- "text/plain": [
162
- "{'input': 'hi!',\n",
163
- " 'chat_history': '',\n",
164
- " 'output': 'Halo! Selamat datang di Allofresh. Bagaimana saya bisa membantu Anda hari ini?',\n",
165
- " 'intermediate_steps': []}"
166
- ]
167
- },
168
- "execution_count": 4,
169
- "metadata": {},
170
- "output_type": "execute_result"
171
- }
172
- ],
173
- "source": [
174
- "next(cb.answer_agent_stream(\"hi!\"))"
175
- ]
176
- },
177
- {
178
- "cell_type": "code",
179
- "execution_count": null,
180
- "id": "f3cbf6ba",
181
- "metadata": {},
182
- "outputs": [],
183
- "source": []
184
- }
185
- ],
186
- "metadata": {
187
- "kernelspec": {
188
- "display_name": "Python 3 (ipykernel)",
189
- "language": "python",
190
- "name": "python3"
191
- },
192
- "language_info": {
193
- "codemirror_mode": {
194
- "name": "ipython",
195
- "version": 3
196
- },
197
- "file_extension": ".py",
198
- "mimetype": "text/x-python",
199
- "name": "python",
200
- "nbconvert_exporter": "python",
201
- "pygments_lexer": "ipython3",
202
- "version": "3.9.7"
203
- }
204
- },
205
- "nbformat": 4,
206
- "nbformat_minor": 5
207
- }
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "d00fd328",
7
+ "metadata": {
8
+ "scrolled": true
9
+ },
10
+ "outputs": [],
11
+ "source": [
12
+ "from allofresh_chatbot import AllofreshChatbot"
13
+ ]
14
+ },
15
+ {
16
+ "cell_type": "code",
17
+ "execution_count": 2,
18
+ "id": "a460d797",
19
+ "metadata": {},
20
+ "outputs": [],
21
+ "source": [
22
+ "cb = AllofreshChatbot(debug=True, streaming=True)"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 4,
28
+ "id": "9fac3062",
29
+ "metadata": {},
30
+ "outputs": [
31
+ {
32
+ "data": {
33
+ "text/plain": [
34
+ "AzureChatOpenAI(verbose=False, callbacks=[<langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at 0x7f0110661520>], callback_manager=None, client=<class 'openai.api_resources.chat_completion.ChatCompletion'>, model_name='gpt-4', temperature=0.0, model_kwargs={}, openai_api_key='27becc6ad5ad4bf598e283834b2283d2', openai_organization='org-jh9tj9m1gO54wupk6wUxID6V', request_timeout=None, max_retries=6, streaming=True, n=1, max_tokens=None, deployment_name='dev-gpt-4', openai_api_type='azure', openai_api_base='https://dev-gpt.openai.azure.com/', openai_api_version='2023-03-15-preview')"
35
+ ]
36
+ },
37
+ "execution_count": 4,
38
+ "metadata": {},
39
+ "output_type": "execute_result"
40
+ }
41
+ ],
42
+ "source": [
43
+ "cb.llms[\"gpt-4-streaming\"]"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "code",
48
+ "execution_count": 3,
49
+ "id": "2df2878f",
50
+ "metadata": {},
51
+ "outputs": [
52
+ {
53
+ "ename": "ValueError",
54
+ "evalue": "`run` not supported when there is not exactly one output key. Got ['output', 'intermediate_steps'].",
55
+ "output_type": "error",
56
+ "traceback": [
57
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
58
+ "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
59
+ "\u001b[0;32m/tmp/ipykernel_271/3973943316.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mcb\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mans_agent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"halo!\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
60
+ "\u001b[0;32m~/anaconda3/lib/python3.9/site-packages/langchain/chains/base.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, callbacks, *args, **kwargs)\u001b[0m\n\u001b[1;32m 226\u001b[0m \u001b[0;34m\"\"\"Run the chain as text in, text out or multiple variables, text out.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 227\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_keys\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 228\u001b[0;31m raise ValueError(\n\u001b[0m\u001b[1;32m 229\u001b[0m \u001b[0;34mf\"`run` not supported when there is not exactly \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 230\u001b[0m \u001b[0;34mf\"one output key. Got {self.output_keys}.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
61
+ "\u001b[0;31mValueError\u001b[0m: `run` not supported when there is not exactly one output key. Got ['output', 'intermediate_steps']."
62
+ ]
63
+ }
64
+ ],
65
+ "source": [
66
+ "cb.ans_agent.run(\"halo!\")"
67
+ ]
68
+ },
69
+ {
70
+ "cell_type": "code",
71
+ "execution_count": 4,
72
+ "id": "d88b15ff",
73
+ "metadata": {},
74
+ "outputs": [
75
+ {
76
+ "data": {
77
+ "text/plain": [
78
+ "ConversationBufferMemory(chat_memory=ChatMessageHistory(messages=[]), output_key=None, input_key=None, return_messages=False, human_prefix='Human', ai_prefix='AI', memory_key='chat_history')"
79
+ ]
80
+ },
81
+ "execution_count": 4,
82
+ "metadata": {},
83
+ "output_type": "execute_result"
84
+ }
85
+ ],
86
+ "source": [
87
+ "cb.ans_memory"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": 4,
93
+ "id": "4b28b557",
94
+ "metadata": {},
95
+ "outputs": [
96
+ {
97
+ "data": {
98
+ "text/plain": [
99
+ "AgentExecutor(memory=ConversationBufferMemory(chat_memory=ChatMessageHistory(messages=[HumanMessage(content='halo', additional_kwargs={}, example=False), AIMessage(content='Halo! Selamat datang di Allofresh. Bagaimana saya bisa membantu Anda hari ini?', additional_kwargs={}, example=False)]), output_key='output', input_key=None, return_messages=False, human_prefix='Human', ai_prefix='AI', memory_key='chat_history'), callbacks=None, callback_manager=None, verbose=True, agent=ConversationalAgent(llm_chain=LLMChain(memory=None, callbacks=None, callback_manager=None, verbose=False, prompt=PromptTemplate(input_variables=['input', 'chat_history', 'agent_scratchpad'], output_parser=None, partial_variables={}, template=\"\\nYou are Allofresh-Assistant, an AI language model that has been trained to serve Allofresh, \\nan online e-grocery platform selling supermarket products with a focus on fresh produces. \\nYour primary function is to assist customers with their shopping needs, \\nincluding but not limited to answering questions on products and services offered Allofresh.\\n\\nYou have access to the supermarket's knowledge base (products, vouchers, etc.). \\nYou should use this information to provide accurate and helpful responses to customer inquiries. \\nYou must remember the name and description of each tool. \\nCustomers might give you questions which you can answer without tools, \\nbut questions which requires specific knowledge regarding the supermarket must be validated to the knowledge base. \\nIf you can't answer a question with or without tools, politely apologize that you don't know.\\n\\nYou must answer in formal yet friendly bahasa Indonesia.\\n\\n\\nTOOLS:\\n------\\n\\n\\n> Product Search: \\n To search for products in Allofresh's Database. \\n Always use this to verify product names. \\n Outputs product names and prices\\n \\n\\nTo use a tool, please use the following format:\\n\\n```\\nThought: Do I need to use a tool? Yes\\nAction: the action to take, should be one of [Product Search]\\nAction Input: the input to the action\\nObservation: the result of the action\\n```\\n\\nWhen you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:\\n\\n```\\nThought: Do I need to use a tool? No\\nAI: [your response here]\\n```\\n\\n\\nYou are very strict on correctness and will never fake an information regarding the supermarket (product names, location, price, vouchers, etc.).\\nYou must validate every information related to Allofresh to Allofresh's knowledge base \\nYou must answer the user's question as informative as possible\\n\\nTake into account the previous conversation history:\\n{chat_history}\\n\\nBegin! Remember you must give the final answer in bahasa indonesia\\n\\nNew Input: {input}\\n{agent_scratchpad}\\n...\\n\", template_format='f-string', validate_template=True), llm=AzureChatOpenAI(verbose=False, callbacks=[<utils.FinalStreamingStdOutCallbackHandler object at 0x7f9bcb4fe5e0>], callback_manager=None, client=<class 'openai.api_resources.chat_completion.ChatCompletion'>, model_name='gpt-4', temperature=0.0, model_kwargs={}, openai_api_key='27becc6ad5ad4bf598e283834b2283d2', openai_organization='org-jh9tj9m1gO54wupk6wUxID6V', request_timeout=None, max_retries=6, streaming=True, n=1, max_tokens=None, deployment_name='dev-gpt-4', openai_api_type='azure', openai_api_base='https://dev-gpt.openai.azure.com/', openai_api_version='2023-03-15-preview'), output_key='text'), output_parser=ConvoOutputParser(ai_prefix='AI'), allowed_tools=['Product Search'], ai_prefix='AI'), tools=[Tool(name='Product Search', description=\"\\n To search for products in Allofresh's Database. \\n Always use this to verify product names. \\n Outputs product names and prices\\n \", args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, func=<function lctool_search_allo_api at 0x7f9bcb4edd30>, coroutine=None)], return_intermediate_steps=True, max_iterations=15, max_execution_time=None, early_stopping_method='force', handle_parsing_errors=False)"
100
+ ]
101
+ },
102
+ "execution_count": 4,
103
+ "metadata": {},
104
+ "output_type": "execute_result"
105
+ }
106
+ ],
107
+ "source": [
108
+ "cb.ans_agent"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "code",
113
+ "execution_count": 4,
114
+ "id": "b389bbe3",
115
+ "metadata": {
116
+ "scrolled": true
117
+ },
118
+ "outputs": [
119
+ {
120
+ "name": "stdout",
121
+ "output_type": "stream",
122
+ "text": [
123
+ "\n",
124
+ "\n",
125
+ "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n"
126
+ ]
127
+ },
128
+ {
129
+ "name": "stderr",
130
+ "output_type": "stream",
131
+ "text": [
132
+ "/home/ardyh/anaconda3/lib/python3.9/site-packages/langchain/callbacks/manager.py:90: RuntimeWarning: coroutine 'AsyncIteratorCallbackHandler.on_llm_start' was never awaited\n",
133
+ " getattr(handler, event_name)(*args, **kwargs)\n",
134
+ "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n",
135
+ "/home/ardyh/anaconda3/lib/python3.9/site-packages/langchain/callbacks/manager.py:90: RuntimeWarning: coroutine 'AsyncIteratorCallbackHandler.on_llm_new_token' was never awaited\n",
136
+ " getattr(handler, event_name)(*args, **kwargs)\n",
137
+ "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n"
138
+ ]
139
+ },
140
+ {
141
+ "name": "stdout",
142
+ "output_type": "stream",
143
+ "text": [
144
+ "\u001b[32;1m\u001b[1;3mThought: Do I need to use a tool? No\n",
145
+ "AI: Halo! Selamat datang di Allofresh. Bagaimana saya bisa membantu Anda hari ini?\u001b[0m\n",
146
+ "\n",
147
+ "\u001b[1m> Finished chain.\u001b[0m\n"
148
+ ]
149
+ },
150
+ {
151
+ "name": "stderr",
152
+ "output_type": "stream",
153
+ "text": [
154
+ "/home/ardyh/anaconda3/lib/python3.9/site-packages/langchain/callbacks/manager.py:90: RuntimeWarning: coroutine 'AsyncIteratorCallbackHandler.on_llm_end' was never awaited\n",
155
+ " getattr(handler, event_name)(*args, **kwargs)\n",
156
+ "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n"
157
+ ]
158
+ },
159
+ {
160
+ "data": {
161
+ "text/plain": [
162
+ "{'input': 'hi!',\n",
163
+ " 'chat_history': '',\n",
164
+ " 'output': 'Halo! Selamat datang di Allofresh. Bagaimana saya bisa membantu Anda hari ini?',\n",
165
+ " 'intermediate_steps': []}"
166
+ ]
167
+ },
168
+ "execution_count": 4,
169
+ "metadata": {},
170
+ "output_type": "execute_result"
171
+ }
172
+ ],
173
+ "source": [
174
+ "next(cb.answer_agent_stream(\"hi!\"))"
175
+ ]
176
+ },
177
+ {
178
+ "cell_type": "code",
179
+ "execution_count": null,
180
+ "id": "f3cbf6ba",
181
+ "metadata": {},
182
+ "outputs": [],
183
+ "source": []
184
+ }
185
+ ],
186
+ "metadata": {
187
+ "kernelspec": {
188
+ "display_name": "Python 3 (ipykernel)",
189
+ "language": "python",
190
+ "name": "python3"
191
+ },
192
+ "language_info": {
193
+ "codemirror_mode": {
194
+ "name": "ipython",
195
+ "version": 3
196
+ },
197
+ "file_extension": ".py",
198
+ "mimetype": "text/x-python",
199
+ "name": "python",
200
+ "nbconvert_exporter": "python",
201
+ "pygments_lexer": "ipython3",
202
+ "version": "3.9.7"
203
+ }
204
+ },
205
+ "nbformat": 4,
206
+ "nbformat_minor": 5
207
+ }