Spaces:
Runtime error
Runtime error
initial push from existing
Browse files- .gitignore +3 -0
- allofresh_chatbot.py +173 -0
- app.py +41 -0
- prompts/ans_prompt.py +58 -0
- prompts/mod_prompt.py +17 -0
- prompts/reco_prompt.py +44 -0
- requirements.txt +3 -0
- utils.py +44 -0
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
**__init__.py
|
3 |
+
__pycache__
|
allofresh_chatbot.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
|
4 |
+
from langchain import PromptTemplate, LLMChain
|
5 |
+
from langchain.agents import initialize_agent, Tool
|
6 |
+
from langchain.chat_models import AzureChatOpenAI
|
7 |
+
from langchain.agents import ZeroShotAgent, AgentExecutor
|
8 |
+
from langchain.chains.conversation.memory import ConversationBufferMemory
|
9 |
+
from langchain.callbacks import get_openai_callback
|
10 |
+
from langchain.chains.llm import LLMChain
|
11 |
+
from langchain.llms import AzureOpenAI
|
12 |
+
from langchain.prompts import PromptTemplate
|
13 |
+
|
14 |
+
from utils import lctool_search_allo_api, cut_dialogue_history
|
15 |
+
from prompts.mod_prompt import MOD_PROMPT
|
16 |
+
from prompts.ans_prompt import ANS_PREFIX, ANS_FORMAT_INSTRUCTIONS, ANS_SUFFIX
|
17 |
+
from prompts.reco_prompt import RECO_PREFIX, RECO_FORMAT_INSTRUCTIONS, RECO_SUFFIX
|
18 |
+
|
19 |
+
load_dotenv()
|
20 |
+
|
21 |
+
class AllofreshChatbot():
|
22 |
+
def __init__(self, debug=False):
|
23 |
+
self.ans_memory = None
|
24 |
+
self.debug = debug
|
25 |
+
|
26 |
+
# init llm
|
27 |
+
self.llms = self.init_llm()
|
28 |
+
# init moderation chain
|
29 |
+
self.mod_chain = self.init_mod_chain()
|
30 |
+
# init answering agent
|
31 |
+
self.ans_memory = self.init_ans_memory()
|
32 |
+
self.ans_agent = self.init_ans_agent()
|
33 |
+
# init reco agent
|
34 |
+
self.reco_agent = self.init_reco_agent()
|
35 |
+
|
36 |
+
def init_llm(self):
|
37 |
+
return {
|
38 |
+
"gpt-4": AzureChatOpenAI(
|
39 |
+
temperature=0,
|
40 |
+
deployment_name = os.getenv("DEPLOYMENT_NAME_GPT4"),
|
41 |
+
model_name = os.getenv("MODEL_NAME_GPT4"),
|
42 |
+
openai_api_type = os.getenv("OPENAI_API_TYPE"),
|
43 |
+
openai_api_base = os.getenv("OPENAI_API_BASE"),
|
44 |
+
openai_api_version = os.getenv("OPENAI_API_VERSION"),
|
45 |
+
openai_api_key = os.getenv("OPENAI_API_KEY"),
|
46 |
+
openai_organization = os.getenv("OPENAI_ORGANIZATION")
|
47 |
+
),
|
48 |
+
"gpt-3.5": AzureChatOpenAI(
|
49 |
+
temperature=0,
|
50 |
+
deployment_name = os.getenv("DEPLOYMENT_NAME_GPT3.5"),
|
51 |
+
model_name = os.getenv("MODEL_NAME_GPT3.5"),
|
52 |
+
openai_api_type = os.getenv("OPENAI_API_TYPE"),
|
53 |
+
openai_api_base = os.getenv("OPENAI_API_BASE"),
|
54 |
+
openai_api_version = os.getenv("OPENAI_API_VERSION"),
|
55 |
+
openai_api_key = os.getenv("OPENAI_API_KEY"),
|
56 |
+
openai_organization = os.getenv("OPENAI_ORGANIZATION")
|
57 |
+
),
|
58 |
+
"gpt-3": AzureOpenAI(
|
59 |
+
temperature=0,
|
60 |
+
deployment_name = os.getenv("DEPLOYMENT_NAME_GPT3"),
|
61 |
+
model_name = os.getenv("MODEL_NAME_GPT3"),
|
62 |
+
openai_api_base = os.getenv("OPENAI_API_BASE"),
|
63 |
+
openai_api_key = os.getenv("OPENAI_API_KEY"),
|
64 |
+
openai_organization = os.getenv("OPENAI_ORGANIZATION")
|
65 |
+
),
|
66 |
+
}
|
67 |
+
|
68 |
+
def init_mod_chain(self):
|
69 |
+
mod_prompt = PromptTemplate(
|
70 |
+
template=MOD_PROMPT,
|
71 |
+
input_variables=["query"]
|
72 |
+
)
|
73 |
+
|
74 |
+
# Define the first LLM chain with the shared AzureOpenAI object and prompt template
|
75 |
+
return LLMChain(llm=self.llms["gpt-4"], prompt=mod_prompt)
|
76 |
+
|
77 |
+
def init_ans_memory(self):
|
78 |
+
return ConversationBufferMemory(memory_key="chat_history", output_key='output')
|
79 |
+
|
80 |
+
def init_ans_agent(self):
|
81 |
+
ans_tools = [
|
82 |
+
Tool(
|
83 |
+
name="Product Search",
|
84 |
+
func=lctool_search_allo_api,
|
85 |
+
description="""
|
86 |
+
To search for products in Allofresh's Database.
|
87 |
+
Always use this to verify product names.
|
88 |
+
Outputs product names and prices
|
89 |
+
"""
|
90 |
+
)
|
91 |
+
]
|
92 |
+
|
93 |
+
return initialize_agent(
|
94 |
+
ans_tools,
|
95 |
+
self.llms["gpt-4"],
|
96 |
+
agent="conversational-react-description",
|
97 |
+
verbose=self.debug,
|
98 |
+
memory=self.ans_memory,
|
99 |
+
return_intermediate_steps=True,
|
100 |
+
agent_kwargs={
|
101 |
+
'prefix': ANS_PREFIX,
|
102 |
+
# 'format_instructions': ANS_FORMAT_INSTRUCTIONS, # only needed for below gpt-4
|
103 |
+
'suffix': ANS_SUFFIX
|
104 |
+
}
|
105 |
+
)
|
106 |
+
|
107 |
+
def ans_pipeline(self, text, debug_cost=False, keep_last_n_words=500):
|
108 |
+
try:
|
109 |
+
self.ans_agent.memory.buffer = cut_dialogue_history(self.ans_agent.memory.buffer, keep_last_n_words=keep_last_n_words)
|
110 |
+
except:
|
111 |
+
pass
|
112 |
+
finally:
|
113 |
+
with get_openai_callback() as openai_cb:
|
114 |
+
res = self.ans_agent({"input": text.strip()})
|
115 |
+
response = res['output'].replace("\\", "/")
|
116 |
+
|
117 |
+
if debug_cost:
|
118 |
+
print(f"Total Tokens: {openai_cb.total_tokens}")
|
119 |
+
print(f"Prompt Tokens: {openai_cb.prompt_tokens}")
|
120 |
+
print(f"Completion Tokens: {openai_cb.completion_tokens}")
|
121 |
+
print(f"Total Cost (USD): ${openai_cb.total_cost}")
|
122 |
+
|
123 |
+
return response
|
124 |
+
|
125 |
+
def init_reco_agent(self):
|
126 |
+
reco_tools = [
|
127 |
+
Tool(
|
128 |
+
name="Product Search",
|
129 |
+
func=lctool_search_allo_api,
|
130 |
+
description="""
|
131 |
+
To search for products in Allofresh's Database.
|
132 |
+
Always use this to verify product names.
|
133 |
+
Outputs product names and prices
|
134 |
+
"""
|
135 |
+
),
|
136 |
+
Tool(
|
137 |
+
name="No Recommendation",
|
138 |
+
func=lambda x: "No recommendation",
|
139 |
+
description="""
|
140 |
+
Use this if based on the context you don't need to recommend any products
|
141 |
+
"""
|
142 |
+
)
|
143 |
+
]
|
144 |
+
prompt = ZeroShotAgent.create_prompt(
|
145 |
+
reco_tools,
|
146 |
+
prefix=RECO_PREFIX,
|
147 |
+
format_instructions=RECO_FORMAT_INSTRUCTIONS,
|
148 |
+
suffix=RECO_SUFFIX,
|
149 |
+
input_variables=["input", "agent_scratchpad"]
|
150 |
+
)
|
151 |
+
|
152 |
+
llm_chain_reco = LLMChain(llm=self.llms["gpt-4"], prompt=prompt)
|
153 |
+
agent_reco = ZeroShotAgent(llm_chain=llm_chain_reco, allowed_tools=[tool.name for tool in reco_tools])
|
154 |
+
return AgentExecutor.from_agent_and_tools(agent=agent_reco, tools=reco_tools, verbose=self.debug)
|
155 |
+
|
156 |
+
def answer(self, query):
|
157 |
+
# moderate
|
158 |
+
mod_verdict = self.mod_chain.run({"query": query})
|
159 |
+
# if pass moderation
|
160 |
+
if mod_verdict == "True":
|
161 |
+
# answer question
|
162 |
+
answer = self.ans_pipeline(query)
|
163 |
+
# recommend
|
164 |
+
reco = self.reco_agent.run({"input": self.ans_agent.memory.buffer})
|
165 |
+
if len(reco) > 0:
|
166 |
+
self.ans_agent.memory.chat_memory.add_ai_message(reco)
|
167 |
+
# construct output
|
168 |
+
return (answer, reco)
|
169 |
+
else:
|
170 |
+
return (
|
171 |
+
"Maaf saya tidak bisa membantu Anda untuk itu..., tapi silakan tanya Allofresh-Assistant apa saja terkait kebutuhan berbelanja Anda!",
|
172 |
+
""
|
173 |
+
)
|
app.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
from loguru import logger
|
4 |
+
from pydantic import BaseModel
|
5 |
+
|
6 |
+
from allofresh_chatbot import AllofreshChatbot
|
7 |
+
|
8 |
+
allo_chatbot = AllofreshChatbot(debug=False)
|
9 |
+
|
10 |
+
class Message(BaseModel):
|
11 |
+
role: str
|
12 |
+
content: str
|
13 |
+
|
14 |
+
async def predict(input, history):
|
15 |
+
"""
|
16 |
+
Predict the response of the chatbot and complete a running list of chat history.
|
17 |
+
"""
|
18 |
+
history.append({"role": "user", "content": input})
|
19 |
+
|
20 |
+
answer, reco = allo_chatbot.answer(input)
|
21 |
+
|
22 |
+
history.append({"role": "assistant", "content": answer})
|
23 |
+
if len(reco) > 0:
|
24 |
+
history.append({"role": "user", "content": ""})
|
25 |
+
history.append({"role": "assistant", "content": reco})
|
26 |
+
|
27 |
+
messages = [(history[i]["content"], history[i+1]["content"]) for i in range(0, len(history)-1, 2)]
|
28 |
+
return messages, history
|
29 |
+
|
30 |
+
"""
|
31 |
+
Gradio Blocks low-level API that allows to create custom web applications (here our chat app)
|
32 |
+
"""
|
33 |
+
with gr.Blocks() as demo:
|
34 |
+
logger.info("Starting Demo...")
|
35 |
+
chatbot = gr.Chatbot(label="Allofresh Assistant")
|
36 |
+
state = gr.State([])
|
37 |
+
with gr.Row():
|
38 |
+
txt = gr.Textbox(show_label=False, placeholder="Enter text, then press enter").style(container=False)
|
39 |
+
txt.submit(predict, [txt, state], [chatbot, state])
|
40 |
+
|
41 |
+
demo.launch()
|
prompts/ans_prompt.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ANS_PREFIX = """
|
2 |
+
You are Allofresh-Assistant, an AI language model that has been trained to serve Allofresh,
|
3 |
+
an online e-grocery platform selling supermarket products with a focus on fresh produces.
|
4 |
+
Your primary function is to assist customers with their shopping needs,
|
5 |
+
including but not limited to answering questions on products and services offered Allofresh.
|
6 |
+
|
7 |
+
You can answer questions regarding what people can do with the products they buy at Allofresh.
|
8 |
+
e.g. food and recipes as it will nudge people to buy products
|
9 |
+
|
10 |
+
If a customer asks you a question that falls outside of your function or knowledge as an online supermarket assistant,
|
11 |
+
you must politely decline to answer and redirect the conversation back to your area of expertise.
|
12 |
+
|
13 |
+
You have access to the supermarket's knowledge base (products, vouchers, etc.).
|
14 |
+
You should use this information to provide accurate and helpful responses to customer inquiries.
|
15 |
+
You must remember the name and description of each tool.
|
16 |
+
Customers might give you questions which you can answer without tools,
|
17 |
+
but questions which requires specific knowledge regarding the supermarket must be validated to the knowledge base.
|
18 |
+
If you can't answer a question with or without tools, politely apologize that you don't know.
|
19 |
+
|
20 |
+
You must answer in formal yet friendly bahasa Indonesia.
|
21 |
+
|
22 |
+
|
23 |
+
TOOLS:
|
24 |
+
------
|
25 |
+
"""
|
26 |
+
ANS_FORMAT_INSTRUCTIONS = """
|
27 |
+
To use a tool, please use the following format:
|
28 |
+
|
29 |
+
```
|
30 |
+
Thought: Do I need to use a tool? Yes
|
31 |
+
Action: the action to take, should be one of [{tool_names}]
|
32 |
+
Action Input: the input to the action
|
33 |
+
Observation: the result of the action
|
34 |
+
```
|
35 |
+
|
36 |
+
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
|
37 |
+
|
38 |
+
```
|
39 |
+
Thought: Do I need to use a tool? No
|
40 |
+
```
|
41 |
+
|
42 |
+
Finally, whether you used the tool or not, output the answer
|
43 |
+
{ai_prefix}: [your response here]
|
44 |
+
"""
|
45 |
+
ANS_SUFFIX = """
|
46 |
+
You are very strict on correctness and will never fake an information regarding the supermarket (product names, location, price, vouchers, etc.).
|
47 |
+
Therefore you must validate every information related to Allofresh to Allofresh's knowledge base
|
48 |
+
You must answer the user's question as informative as possible
|
49 |
+
|
50 |
+
Take into account the previous conversation history:
|
51 |
+
{chat_history}
|
52 |
+
|
53 |
+
Begin! Remember you must give the final answer in bahasa indonesia
|
54 |
+
|
55 |
+
New Input: {input}
|
56 |
+
{agent_scratchpad}
|
57 |
+
...
|
58 |
+
"""
|
prompts/mod_prompt.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MOD_PROMPT = """
|
2 |
+
You are MODERATOR.
|
3 |
+
MODERATOR MUST ONLY classify whether a certain passage belongs to a certain topic.
|
4 |
+
|
5 |
+
INPUT: {query}
|
6 |
+
|
7 |
+
INSTRUCTIONS:
|
8 |
+
Classify WHETHER OR NOT input is RELATED to EITHER of the following:
|
9 |
+
- greetings
|
10 |
+
- supermarket shopping
|
11 |
+
- general questions/inquiry on foods/recipe
|
12 |
+
|
13 |
+
NOTES:
|
14 |
+
- the query might be in bahasa indonesia, english, or a combination of both. you must take into account for both languages
|
15 |
+
|
16 |
+
ONLY ANSWER with either [True, False]
|
17 |
+
"""
|
prompts/reco_prompt.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
RECO_PREFIX = """
|
2 |
+
You are Recobot, an LLM trained to recommend products.
|
3 |
+
You are serving Allofresh, an online e-grocery platform selling supermarket products with a focus on fresh produces.
|
4 |
+
You have the capability to assess the context and determine whether it's appropriate to recommend a product or not
|
5 |
+
You are to evaluate another LLM's output and determine what products to recommend to user based on the output.
|
6 |
+
You NEVER make up product names, and will always check the product database for available products
|
7 |
+
You must answer in formal yet friendly bahasa Indonesia.
|
8 |
+
"""
|
9 |
+
# this is only used for gpt-3.5-turbo and below
|
10 |
+
RECO_FORMAT_INSTRUCTIONS = """
|
11 |
+
You must use the following format:
|
12 |
+
|
13 |
+
Context: the input context you must assess
|
14 |
+
Thought: Do I need to recommend products? If based on context, recommendation related to the last HumanMessage has been given by AI, then no need to recommend. Yes or No?
|
15 |
+
Action: What should I do? must be one of [{tool_names}]
|
16 |
+
Action Input: If you want to recommend products, pass the list of products you want to search, else input empty string
|
17 |
+
Observation: the result of the action
|
18 |
+
... (this Thought/Action/Action Input/Observation can repeat N times)
|
19 |
+
Thought: I now know the final answer
|
20 |
+
Final Answer: if no need to recommend product, only output empty string, else recommend all relevant products based on observation result
|
21 |
+
"""
|
22 |
+
RECO_GPT3_ADD_FORMAT_INSTRUCTIONS = """
|
23 |
+
Example of recommending products
|
24 |
+
Context: Human: Human: gw pengen makanan sehat pake ayam, rendah kalori, rendah karbo, gimana caranya? bagi resep dan cara masak dong'?\nAI: Untuk makanan sehat rendah kalori dan rendah karbohidrat, Anda bisa mencoba resep Ayam Panggang dengan Sayuran. Bahan-bahan yang dibutuhkan adalah ayam fillet, paprika, brokoli, bawang bombay, bawang putih, garam, merica, dan minyak zaitun. Pertama, potong ayam fillet menjadi beberapa bagian dan lumuri dengan garam dan merica. Kemudian panggang ayam di dalam oven selama 20-25 menit. Sementara itu, tumis bawang bombay dan bawang putih dengan sedikit minyak zaitun hingga harum. Tambahkan paprika dan brokoli yang sudah dipotong-potong, lalu tumis hingga sayuran matang. Sajikan ayam panggang dengan sayuran sebagai pelengkap. Selamat mencoba!
|
25 |
+
Thought: Yes
|
26 |
+
Action: Search Products
|
27 |
+
Observation: [reco_result_from_tools]
|
28 |
+
Thought: I now know the final answer
|
29 |
+
Final Answer: [your_final_answer]
|
30 |
+
|
31 |
+
Example of NOT recommending products:
|
32 |
+
Context: Human: hi, ada ayam gak?\nAI: Ya, kami memiliki Ayam Broiler Daging Giling seharga Rp 19.300, Ayam Broiler Paha Tanpa Tulang seharga Rp 16.825, dan Ayam Broiler Paha Atas seharga Rp 17.875. Apakah ada yang ingin Anda beli?
|
33 |
+
Thought: No
|
34 |
+
Action: No Recommendation
|
35 |
+
Observation: [observation_result]
|
36 |
+
Thought: I now know the final answer
|
37 |
+
Final Answer: [your_final_answer]
|
38 |
+
"""
|
39 |
+
RECO_SUFFIX = """
|
40 |
+
Begin!
|
41 |
+
|
42 |
+
Context: {input}
|
43 |
+
{agent_scratchpad}
|
44 |
+
"""
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
langchain==0.0.163
|
2 |
+
loguru==0.6.0
|
3 |
+
openai==0.27.4
|
utils.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import json
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
import os
|
5 |
+
|
6 |
+
load_dotenv()
|
7 |
+
ALLOFRESH_SEARCH_API_BASE = os.getenv("ALLOFRESH_SEARCH_API_BASE")
|
8 |
+
|
9 |
+
def search_allo_api(query, limit=3):
|
10 |
+
response = requests.get(f'{ALLOFRESH_SEARCH_API_BASE}?keyword={query}&limit={limit}&p=1')
|
11 |
+
return json.loads(response.text)
|
12 |
+
|
13 |
+
def lctool_search_allo_api(queries):
|
14 |
+
all_results = []
|
15 |
+
|
16 |
+
try:
|
17 |
+
for q in queries.split(", "):
|
18 |
+
prods_list = search_allo_api(q)["products"]
|
19 |
+
all_results.append({
|
20 |
+
q: [
|
21 |
+
{k: v for k, v in prod_dict.items() if k in ["name", "price"]}
|
22 |
+
for prod_dict in prods_list
|
23 |
+
]
|
24 |
+
})
|
25 |
+
return str(all_results)
|
26 |
+
except Exception as e:
|
27 |
+
return str(e)
|
28 |
+
|
29 |
+
def cut_dialogue_history(history_memory, keep_last_n_words=500):
|
30 |
+
if history_memory is None or len(history_memory) == 0:
|
31 |
+
return history_memory
|
32 |
+
|
33 |
+
tokens = history_memory.split()
|
34 |
+
n_tokens = len(tokens)
|
35 |
+
# print(f"history_memory: {history_memory}, n_tokens: {n_tokens}")
|
36 |
+
if n_tokens < keep_last_n_words:
|
37 |
+
return history_memory
|
38 |
+
|
39 |
+
paragraphs = history_memory.split('\n')
|
40 |
+
last_n_tokens = n_tokens
|
41 |
+
while last_n_tokens >= keep_last_n_words:
|
42 |
+
last_n_tokens -= len(paragraphs[0].split(' '))
|
43 |
+
paragraphs = paragraphs[1:]
|
44 |
+
return '\n' + '\n'.join(paragraphs)
|