ar-dy commited on
Commit
13727cd
·
1 Parent(s): d042ca4
Files changed (6) hide show
  1. .gitignore +1 -0
  2. allofresh_chatbot.py +32 -2
  3. app.py +37 -10
  4. prompts/ans_prompt.py +25 -0
  5. prompts/mod_prompt.py +2 -3
  6. sandbox.ipynb +207 -0
.gitignore CHANGED
@@ -1,3 +1,4 @@
1
  .env
2
  **__init__.py
3
  __pycache__
 
 
1
  .env
2
  **__init__.py
3
  __pycache__
4
+ .ipynb_checkpoints
allofresh_chatbot.py CHANGED
@@ -13,7 +13,7 @@ from langchain.prompts import PromptTemplate
13
 
14
  from utils import lctool_search_allo_api, cut_dialogue_history
15
  from prompts.mod_prompt import MOD_PROMPT
16
- from prompts.ans_prompt import ANS_PREFIX, ANS_FORMAT_INSTRUCTIONS, ANS_SUFFIX
17
  from prompts.reco_prompt import RECO_PREFIX, RECO_FORMAT_INSTRUCTIONS, RECO_SUFFIX
18
 
19
  load_dotenv()
@@ -30,6 +30,7 @@ class AllofreshChatbot():
30
  # init answering agent
31
  self.ans_memory = self.init_ans_memory()
32
  self.ans_agent = self.init_ans_agent()
 
33
  # init reco agent
34
  self.reco_agent = self.init_reco_agent()
35
 
@@ -68,7 +69,7 @@ class AllofreshChatbot():
68
  def init_mod_chain(self):
69
  mod_prompt = PromptTemplate(
70
  template=MOD_PROMPT,
71
- input_variables=["query"]
72
  )
73
 
74
  # Define the first LLM chain with the shared AzureOpenAI object and prompt template
@@ -104,6 +105,15 @@ class AllofreshChatbot():
104
  }
105
  )
106
 
 
 
 
 
 
 
 
 
 
107
  def ans_pipeline(self, text, debug_cost=False, keep_last_n_words=500):
108
  try:
109
  self.ans_agent.memory.buffer = cut_dialogue_history(self.ans_agent.memory.buffer, keep_last_n_words=keep_last_n_words)
@@ -170,4 +180,24 @@ class AllofreshChatbot():
170
  return (
171
  "Maaf saya tidak bisa membantu Anda untuk itu... tapi silakan tanya Allofresh-Assistant apa saja terkait kebutuhan berbelanja Anda!",
172
  ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  )
 
13
 
14
  from utils import lctool_search_allo_api, cut_dialogue_history
15
  from prompts.mod_prompt import MOD_PROMPT
16
+ from prompts.ans_prompt import ANS_PREFIX, ANS_FORMAT_INSTRUCTIONS, ANS_SUFFIX, ANS_CHAIN_PROMPT
17
  from prompts.reco_prompt import RECO_PREFIX, RECO_FORMAT_INSTRUCTIONS, RECO_SUFFIX
18
 
19
  load_dotenv()
 
30
  # init answering agent
31
  self.ans_memory = self.init_ans_memory()
32
  self.ans_agent = self.init_ans_agent()
33
+ self.ans_chain = self.init_ans_chain()
34
  # init reco agent
35
  self.reco_agent = self.init_reco_agent()
36
 
 
69
  def init_mod_chain(self):
70
  mod_prompt = PromptTemplate(
71
  template=MOD_PROMPT,
72
+ input_variables=["input"]
73
  )
74
 
75
  # Define the first LLM chain with the shared AzureOpenAI object and prompt template
 
105
  }
106
  )
107
 
108
+ def init_ans_chain(self):
109
+ ans_prompt = PromptTemplate(
110
+ template=ANS_CHAIN_PROMPT,
111
+ input_variables=["query"]
112
+ )
113
+
114
+ # Define the first LLM chain with the shared AzureOpenAI object and prompt template
115
+ return LLMChain(llm=self.llms["gpt-4"], prompt=ans_prompt)
116
+
117
  def ans_pipeline(self, text, debug_cost=False, keep_last_n_words=500):
118
  try:
119
  self.ans_agent.memory.buffer = cut_dialogue_history(self.ans_agent.memory.buffer, keep_last_n_words=keep_last_n_words)
 
180
  return (
181
  "Maaf saya tidak bisa membantu Anda untuk itu... tapi silakan tanya Allofresh-Assistant apa saja terkait kebutuhan berbelanja Anda!",
182
  ""
183
+ )
184
+
185
+ def answer_optim_v1(self, query, chat_history):
186
+ """
187
+ We plugged off the tools from the 'answering' component and replaced it with a simple chain
188
+ """
189
+ # moderate
190
+ mod_verdict = self.mod_chain.run({"input": query})
191
+ # if pass moderation
192
+ if mod_verdict == "True":
193
+ # answer question
194
+ answer = self.ans_chain.run({"input": query, "chat_history": chat_history})
195
+ # recommend
196
+ reco = self.reco_agent.run({"input": chat_history})
197
+ # construct output
198
+ return (answer, reco)
199
+ else:
200
+ return (
201
+ "Maaf saya tidak bisa membantu Anda untuk itu... tapi silakan tanya Allofresh-Assistant apa saja terkait makanan atau resep yang Anda inginkan!",
202
+ None
203
  )
app.py CHANGED
@@ -4,6 +4,7 @@ from loguru import logger
4
  from pydantic import BaseModel
5
 
6
  from allofresh_chatbot import AllofreshChatbot
 
7
 
8
  allo_chatbot = AllofreshChatbot(debug=True)
9
 
@@ -11,21 +12,41 @@ class Message(BaseModel):
11
  role: str
12
  content: str
13
 
14
- async def predict(input, history):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  """
16
  Predict the response of the chatbot and complete a running list of chat history.
17
  """
18
- history.append({"role": "user", "content": input})
19
 
20
- answer, reco = allo_chatbot.answer(input)
 
21
 
22
- history.append({"role": "assistant", "content": answer})
23
- if len(reco) > 0:
24
- history.append({"role": "user", "content": ""})
25
- history.append({"role": "assistant", "content": reco})
26
 
27
- messages = [(history[i]["content"], history[i+1]["content"]) for i in range(0, len(history)-1, 2)]
28
- return messages, history
29
 
30
  """
31
  Gradio Blocks low-level API that allows to create custom web applications (here our chat app)
@@ -36,7 +57,13 @@ with gr.Blocks() as app:
36
  state = gr.State([])
37
  with gr.Row():
38
  txt = gr.Textbox(show_label=False, placeholder="Enter text, then press enter").style(container=False)
39
- txt.submit(predict, [txt, state], [chatbot, state])
 
 
 
 
 
 
40
 
41
  app.queue(concurrency_count=4)
42
  app.launch()
 
4
  from pydantic import BaseModel
5
 
6
  from allofresh_chatbot import AllofreshChatbot
7
+ from utils import cut_dialogue_history
8
 
9
  allo_chatbot = AllofreshChatbot(debug=True)
10
 
 
12
  role: str
13
  content: str
14
 
15
+ def fetch_messages(history):
16
+ """
17
+ Fetch the messages from the chat history.
18
+ """
19
+ return [(history[i]["content"], history[i+1]["content"]) for i in range(0, len(history)-1, 2)]
20
+
21
+ def preproc_history(history):
22
+ """
23
+ Clean the chat history to remove the None values.
24
+ """
25
+ clean_history = [Message(**msg) for msg in history if msg["content"] is not None]
26
+ return str(cut_dialogue_history(clean_history))
27
+
28
+ def user_input(input, history):
29
+ """
30
+ Add the user input to the chat history.
31
+ """
32
+ history.append({'role': 'user', 'content': input})
33
+ history.append({'role': 'assistant', 'content': None}})
34
+
35
+ return fetch_messages(history), history
36
+
37
+ def predict_answer(input, history):
38
  """
39
  Predict the response of the chatbot and complete a running list of chat history.
40
  """
41
+ answer = allo_chatbot.answer_optim_v1(input, preproc_history(history))
42
 
43
+ history.append({'role': 'user', 'content': None})
44
+ history.append({'role': 'assistant', 'content': answer})
45
 
46
+ return fetch_messages(history), history
 
 
 
47
 
48
+ def predict_reco(input, history):
49
+
50
 
51
  """
52
  Gradio Blocks low-level API that allows to create custom web applications (here our chat app)
 
57
  state = gr.State([])
58
  with gr.Row():
59
  txt = gr.Textbox(show_label=False, placeholder="Enter text, then press enter").style(container=False)
60
+ txt.submit(
61
+
62
+ ).success(
63
+ predict, [txt, state], [chatbot, state]
64
+ ).success(
65
+ predict, [txt, state], [chatbot, state]
66
+ )
67
 
68
  app.queue(concurrency_count=4)
69
  app.launch()
prompts/ans_prompt.py CHANGED
@@ -55,4 +55,29 @@ Begin! Remember you must give the final answer in bahasa indonesia
55
  New Input: {input}
56
  {agent_scratchpad}
57
  ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  """
 
55
  New Input: {input}
56
  {agent_scratchpad}
57
  ...
58
+ """
59
+ ANS_CHAIN_PROMPT = """
60
+ You are Allofresh-Assistant, an AI language model that has been trained to serve Allofresh,
61
+ an online e-grocery platform selling supermarket products with a focus on fresh produces.
62
+ Your primary function is to assist customers with their shopping needs,
63
+ including but not limited to answering questions on products and services offered Allofresh.
64
+
65
+ You can answer questions regarding what people can do with the products they buy at Allofresh.
66
+ e.g. food and recipes as it will nudge people to buy products
67
+
68
+ If a customer asks you a question that falls outside of your function or knowledge as an online supermarket assistant,
69
+ you must politely decline to answer and redirect the conversation back to your area of expertise.
70
+
71
+ You are very strict on correctness and will never fake an information regarding the supermarket (product names, location, price, vouchers, etc.).
72
+ When you encounter a question that requires validation to the supermarket's knowledge base, politely ask the user to wait while you validate the information.
73
+
74
+ Take into account the previous conversation history:
75
+ {chat_history}
76
+
77
+ New user input: {input}
78
+
79
+ Remember! You must answer in formal yet friendly bahasa Indonesia.
80
+
81
+ Answer:
82
+ ...
83
  """
prompts/mod_prompt.py CHANGED
@@ -2,13 +2,12 @@ MOD_PROMPT = """
2
  You are MODERATOR.
3
  MODERATOR MUST ONLY classify whether a certain passage belongs to a certain topic.
4
 
5
- INPUT: {query}
6
 
7
  INSTRUCTIONS:
8
  Classify WHETHER OR NOT input is RELATED to EITHER of the following:
9
  - greetings
10
- - supermarket shopping
11
- - general questions/inquiry on foods/recipe
12
 
13
  NOTES:
14
  - the query might be in bahasa indonesia, english, or a combination of both. you must take into account for both languages
 
2
  You are MODERATOR.
3
  MODERATOR MUST ONLY classify whether a certain passage belongs to a certain topic.
4
 
5
+ INPUT: {input}
6
 
7
  INSTRUCTIONS:
8
  Classify WHETHER OR NOT input is RELATED to EITHER of the following:
9
  - greetings
10
+ - foods, ingredients, food recipes
 
11
 
12
  NOTES:
13
  - the query might be in bahasa indonesia, english, or a combination of both. you must take into account for both languages
sandbox.ipynb ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "d00fd328",
7
+ "metadata": {
8
+ "scrolled": true
9
+ },
10
+ "outputs": [],
11
+ "source": [
12
+ "from allofresh_chatbot import AllofreshChatbot"
13
+ ]
14
+ },
15
+ {
16
+ "cell_type": "code",
17
+ "execution_count": 2,
18
+ "id": "a460d797",
19
+ "metadata": {},
20
+ "outputs": [],
21
+ "source": [
22
+ "cb = AllofreshChatbot(debug=True, streaming=True)"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 4,
28
+ "id": "9fac3062",
29
+ "metadata": {},
30
+ "outputs": [
31
+ {
32
+ "data": {
33
+ "text/plain": [
34
+ "AzureChatOpenAI(verbose=False, callbacks=[<langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at 0x7f0110661520>], callback_manager=None, client=<class 'openai.api_resources.chat_completion.ChatCompletion'>, model_name='gpt-4', temperature=0.0, model_kwargs={}, openai_api_key='27becc6ad5ad4bf598e283834b2283d2', openai_organization='org-jh9tj9m1gO54wupk6wUxID6V', request_timeout=None, max_retries=6, streaming=True, n=1, max_tokens=None, deployment_name='dev-gpt-4', openai_api_type='azure', openai_api_base='https://dev-gpt.openai.azure.com/', openai_api_version='2023-03-15-preview')"
35
+ ]
36
+ },
37
+ "execution_count": 4,
38
+ "metadata": {},
39
+ "output_type": "execute_result"
40
+ }
41
+ ],
42
+ "source": [
43
+ "cb.llms[\"gpt-4-streaming\"]"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "code",
48
+ "execution_count": 3,
49
+ "id": "2df2878f",
50
+ "metadata": {},
51
+ "outputs": [
52
+ {
53
+ "ename": "ValueError",
54
+ "evalue": "`run` not supported when there is not exactly one output key. Got ['output', 'intermediate_steps'].",
55
+ "output_type": "error",
56
+ "traceback": [
57
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
58
+ "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
59
+ "\u001b[0;32m/tmp/ipykernel_271/3973943316.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mcb\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mans_agent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"halo!\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
60
+ "\u001b[0;32m~/anaconda3/lib/python3.9/site-packages/langchain/chains/base.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, callbacks, *args, **kwargs)\u001b[0m\n\u001b[1;32m 226\u001b[0m \u001b[0;34m\"\"\"Run the chain as text in, text out or multiple variables, text out.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 227\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_keys\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 228\u001b[0;31m raise ValueError(\n\u001b[0m\u001b[1;32m 229\u001b[0m \u001b[0;34mf\"`run` not supported when there is not exactly \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 230\u001b[0m \u001b[0;34mf\"one output key. Got {self.output_keys}.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
61
+ "\u001b[0;31mValueError\u001b[0m: `run` not supported when there is not exactly one output key. Got ['output', 'intermediate_steps']."
62
+ ]
63
+ }
64
+ ],
65
+ "source": [
66
+ "cb.ans_agent.run(\"halo!\")"
67
+ ]
68
+ },
69
+ {
70
+ "cell_type": "code",
71
+ "execution_count": 4,
72
+ "id": "d88b15ff",
73
+ "metadata": {},
74
+ "outputs": [
75
+ {
76
+ "data": {
77
+ "text/plain": [
78
+ "ConversationBufferMemory(chat_memory=ChatMessageHistory(messages=[]), output_key=None, input_key=None, return_messages=False, human_prefix='Human', ai_prefix='AI', memory_key='chat_history')"
79
+ ]
80
+ },
81
+ "execution_count": 4,
82
+ "metadata": {},
83
+ "output_type": "execute_result"
84
+ }
85
+ ],
86
+ "source": [
87
+ "cb.ans_memory"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": 4,
93
+ "id": "4b28b557",
94
+ "metadata": {},
95
+ "outputs": [
96
+ {
97
+ "data": {
98
+ "text/plain": [
99
+ "AgentExecutor(memory=ConversationBufferMemory(chat_memory=ChatMessageHistory(messages=[HumanMessage(content='halo', additional_kwargs={}, example=False), AIMessage(content='Halo! Selamat datang di Allofresh. Bagaimana saya bisa membantu Anda hari ini?', additional_kwargs={}, example=False)]), output_key='output', input_key=None, return_messages=False, human_prefix='Human', ai_prefix='AI', memory_key='chat_history'), callbacks=None, callback_manager=None, verbose=True, agent=ConversationalAgent(llm_chain=LLMChain(memory=None, callbacks=None, callback_manager=None, verbose=False, prompt=PromptTemplate(input_variables=['input', 'chat_history', 'agent_scratchpad'], output_parser=None, partial_variables={}, template=\"\\nYou are Allofresh-Assistant, an AI language model that has been trained to serve Allofresh, \\nan online e-grocery platform selling supermarket products with a focus on fresh produces. \\nYour primary function is to assist customers with their shopping needs, \\nincluding but not limited to answering questions on products and services offered Allofresh.\\n\\nYou have access to the supermarket's knowledge base (products, vouchers, etc.). \\nYou should use this information to provide accurate and helpful responses to customer inquiries. \\nYou must remember the name and description of each tool. \\nCustomers might give you questions which you can answer without tools, \\nbut questions which requires specific knowledge regarding the supermarket must be validated to the knowledge base. \\nIf you can't answer a question with or without tools, politely apologize that you don't know.\\n\\nYou must answer in formal yet friendly bahasa Indonesia.\\n\\n\\nTOOLS:\\n------\\n\\n\\n> Product Search: \\n To search for products in Allofresh's Database. \\n Always use this to verify product names. \\n Outputs product names and prices\\n \\n\\nTo use a tool, please use the following format:\\n\\n```\\nThought: Do I need to use a tool? Yes\\nAction: the action to take, should be one of [Product Search]\\nAction Input: the input to the action\\nObservation: the result of the action\\n```\\n\\nWhen you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:\\n\\n```\\nThought: Do I need to use a tool? No\\nAI: [your response here]\\n```\\n\\n\\nYou are very strict on correctness and will never fake an information regarding the supermarket (product names, location, price, vouchers, etc.).\\nYou must validate every information related to Allofresh to Allofresh's knowledge base \\nYou must answer the user's question as informative as possible\\n\\nTake into account the previous conversation history:\\n{chat_history}\\n\\nBegin! Remember you must give the final answer in bahasa indonesia\\n\\nNew Input: {input}\\n{agent_scratchpad}\\n...\\n\", template_format='f-string', validate_template=True), llm=AzureChatOpenAI(verbose=False, callbacks=[<utils.FinalStreamingStdOutCallbackHandler object at 0x7f9bcb4fe5e0>], callback_manager=None, client=<class 'openai.api_resources.chat_completion.ChatCompletion'>, model_name='gpt-4', temperature=0.0, model_kwargs={}, openai_api_key='27becc6ad5ad4bf598e283834b2283d2', openai_organization='org-jh9tj9m1gO54wupk6wUxID6V', request_timeout=None, max_retries=6, streaming=True, n=1, max_tokens=None, deployment_name='dev-gpt-4', openai_api_type='azure', openai_api_base='https://dev-gpt.openai.azure.com/', openai_api_version='2023-03-15-preview'), output_key='text'), output_parser=ConvoOutputParser(ai_prefix='AI'), allowed_tools=['Product Search'], ai_prefix='AI'), tools=[Tool(name='Product Search', description=\"\\n To search for products in Allofresh's Database. \\n Always use this to verify product names. \\n Outputs product names and prices\\n \", args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, func=<function lctool_search_allo_api at 0x7f9bcb4edd30>, coroutine=None)], return_intermediate_steps=True, max_iterations=15, max_execution_time=None, early_stopping_method='force', handle_parsing_errors=False)"
100
+ ]
101
+ },
102
+ "execution_count": 4,
103
+ "metadata": {},
104
+ "output_type": "execute_result"
105
+ }
106
+ ],
107
+ "source": [
108
+ "cb.ans_agent"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "code",
113
+ "execution_count": 4,
114
+ "id": "b389bbe3",
115
+ "metadata": {
116
+ "scrolled": true
117
+ },
118
+ "outputs": [
119
+ {
120
+ "name": "stdout",
121
+ "output_type": "stream",
122
+ "text": [
123
+ "\n",
124
+ "\n",
125
+ "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n"
126
+ ]
127
+ },
128
+ {
129
+ "name": "stderr",
130
+ "output_type": "stream",
131
+ "text": [
132
+ "/home/ardyh/anaconda3/lib/python3.9/site-packages/langchain/callbacks/manager.py:90: RuntimeWarning: coroutine 'AsyncIteratorCallbackHandler.on_llm_start' was never awaited\n",
133
+ " getattr(handler, event_name)(*args, **kwargs)\n",
134
+ "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n",
135
+ "/home/ardyh/anaconda3/lib/python3.9/site-packages/langchain/callbacks/manager.py:90: RuntimeWarning: coroutine 'AsyncIteratorCallbackHandler.on_llm_new_token' was never awaited\n",
136
+ " getattr(handler, event_name)(*args, **kwargs)\n",
137
+ "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n"
138
+ ]
139
+ },
140
+ {
141
+ "name": "stdout",
142
+ "output_type": "stream",
143
+ "text": [
144
+ "\u001b[32;1m\u001b[1;3mThought: Do I need to use a tool? No\n",
145
+ "AI: Halo! Selamat datang di Allofresh. Bagaimana saya bisa membantu Anda hari ini?\u001b[0m\n",
146
+ "\n",
147
+ "\u001b[1m> Finished chain.\u001b[0m\n"
148
+ ]
149
+ },
150
+ {
151
+ "name": "stderr",
152
+ "output_type": "stream",
153
+ "text": [
154
+ "/home/ardyh/anaconda3/lib/python3.9/site-packages/langchain/callbacks/manager.py:90: RuntimeWarning: coroutine 'AsyncIteratorCallbackHandler.on_llm_end' was never awaited\n",
155
+ " getattr(handler, event_name)(*args, **kwargs)\n",
156
+ "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n"
157
+ ]
158
+ },
159
+ {
160
+ "data": {
161
+ "text/plain": [
162
+ "{'input': 'hi!',\n",
163
+ " 'chat_history': '',\n",
164
+ " 'output': 'Halo! Selamat datang di Allofresh. Bagaimana saya bisa membantu Anda hari ini?',\n",
165
+ " 'intermediate_steps': []}"
166
+ ]
167
+ },
168
+ "execution_count": 4,
169
+ "metadata": {},
170
+ "output_type": "execute_result"
171
+ }
172
+ ],
173
+ "source": [
174
+ "next(cb.answer_agent_stream(\"hi!\"))"
175
+ ]
176
+ },
177
+ {
178
+ "cell_type": "code",
179
+ "execution_count": null,
180
+ "id": "f3cbf6ba",
181
+ "metadata": {},
182
+ "outputs": [],
183
+ "source": []
184
+ }
185
+ ],
186
+ "metadata": {
187
+ "kernelspec": {
188
+ "display_name": "Python 3 (ipykernel)",
189
+ "language": "python",
190
+ "name": "python3"
191
+ },
192
+ "language_info": {
193
+ "codemirror_mode": {
194
+ "name": "ipython",
195
+ "version": 3
196
+ },
197
+ "file_extension": ".py",
198
+ "mimetype": "text/x-python",
199
+ "name": "python",
200
+ "nbconvert_exporter": "python",
201
+ "pygments_lexer": "ipython3",
202
+ "version": "3.9.7"
203
+ }
204
+ },
205
+ "nbformat": 4,
206
+ "nbformat_minor": 5
207
+ }