Updated prompt
Browse files
app.py
CHANGED
@@ -55,15 +55,12 @@ def get_contexts(retrievalResults):
|
|
55 |
contexts += retrievedResult['content']['text'] + '\n'
|
56 |
return contexts
|
57 |
|
58 |
-
def
|
59 |
-
|
60 |
-
cleaned_text = re.sub(pattern, '', text)
|
61 |
-
return cleaned_text
|
62 |
|
63 |
def get_answer(query, history, temperature, top_p, max_token_count):
|
64 |
-
history = remove_link(history)
|
65 |
contexts = ""
|
66 |
-
|
67 |
max_words = math.floor(max_token_count*0.75)
|
68 |
|
69 |
retrievalResults, retrieve_execution_time = retrieve(query)
|
@@ -71,39 +68,52 @@ def get_answer(query, history, temperature, top_p, max_token_count):
|
|
71 |
|
72 |
if highest_score > 0.45:
|
73 |
contexts = get_contexts(retrievalResults)
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
PROMPT_TEMPLATE = f"""
|
78 |
System: You are an intelligent assistant helping users understand and navigate website functionalities.
|
79 |
Your goal is to provide clear, accurate, and contextually relevant answers based on the information provided.
|
80 |
-
|
81 |
Use the information enclosed in the <context> tags and refer to the conversation history in the <history> tags to answer the user's question in the <question> tags.
|
82 |
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
83 |
|
84 |
Your response must:
|
|
|
85 |
- Be fully formed and grammatically correct without cutting off any sentences.
|
86 |
- Complete a logical thought or sentence before stopping, ensuring the response doesn't end mid-sentence.
|
87 |
- Be clear, easy to understand, and succinct, not exceeding {max_words} words.
|
88 |
- Refer specifically to website features or actions when relevant to the user's question.
|
89 |
- Avoid providing URL links or external references.
|
|
|
90 |
|
91 |
<history>
|
92 |
{history}
|
93 |
</history>
|
94 |
-
|
95 |
<context>
|
96 |
{contexts}
|
97 |
</context>
|
98 |
-
|
99 |
<question>
|
100 |
{query}
|
101 |
</question>
|
102 |
-
|
103 |
Provide a detailed, concise response that fully answers the user's question.
|
104 |
Make sure all sentences of your reponse are completely formed and grammatically correct.
|
105 |
If necessary, reduce the amount of detail provided to keep the response within the word limit but still complete.
|
106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
Assistant:
|
108 |
"""
|
109 |
|
@@ -133,7 +143,10 @@ def get_answer(query, history, temperature, top_p, max_token_count):
|
|
133 |
response_body = json.loads(response.get('body').read())
|
134 |
response_text = response_body['results'][0]['outputText']
|
135 |
|
136 |
-
|
|
|
|
|
|
|
137 |
|
138 |
prompt_and_time = f"""
|
139 |
Prompt:
|
@@ -148,11 +161,12 @@ def format_chat_history(chat_history):
|
|
148 |
prompt = ""
|
149 |
for turn in chat_history:
|
150 |
user_message, bot_message = turn
|
151 |
-
|
|
|
152 |
return prompt
|
153 |
|
154 |
def respond(message, chat_history, temperature=0.9, top_p=0.6, max_token_count=512):
|
155 |
-
formatted_history = format_chat_history(chat_history)
|
156 |
chat_history.append([message, ""])
|
157 |
|
158 |
stream, prompt_and_time = get_answer(message, formatted_history, temperature, top_p, max_token_count)
|
|
|
55 |
contexts += retrievedResult['content']['text'] + '\n'
|
56 |
return contexts
|
57 |
|
58 |
+
def clean_text(text):
|
59 |
+
return re.sub(r"(Here are some related questions you might be interested in:|For more information, follow the links provided:).*", "", text, flags=re.DOTALL)
|
|
|
|
|
60 |
|
61 |
def get_answer(query, history, temperature, top_p, max_token_count):
|
|
|
62 |
contexts = ""
|
63 |
+
unique_article_ids = []
|
64 |
max_words = math.floor(max_token_count*0.75)
|
65 |
|
66 |
retrievalResults, retrieve_execution_time = retrieve(query)
|
|
|
68 |
|
69 |
if highest_score > 0.45:
|
70 |
contexts = get_contexts(retrievalResults)
|
71 |
+
|
72 |
+
for result in retrievalResults:
|
73 |
+
article_id = result['metadata'].get('article_id')
|
74 |
+
if article_id not in unique_article_ids:
|
75 |
+
unique_article_ids.append(article_id)
|
76 |
+
if len(unique_article_ids) == 3:
|
77 |
+
break
|
78 |
|
79 |
PROMPT_TEMPLATE = f"""
|
80 |
System: You are an intelligent assistant helping users understand and navigate website functionalities.
|
81 |
Your goal is to provide clear, accurate, and contextually relevant answers based on the information provided.
|
|
|
82 |
Use the information enclosed in the <context> tags and refer to the conversation history in the <history> tags to answer the user's question in the <question> tags.
|
83 |
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
84 |
|
85 |
Your response must:
|
86 |
+
- Be in the same language that used in question.
|
87 |
- Be fully formed and grammatically correct without cutting off any sentences.
|
88 |
- Complete a logical thought or sentence before stopping, ensuring the response doesn't end mid-sentence.
|
89 |
- Be clear, easy to understand, and succinct, not exceeding {max_words} words.
|
90 |
- Refer specifically to website features or actions when relevant to the user's question.
|
91 |
- Avoid providing URL links or external references.
|
92 |
+
- Use a visually appealing and easy-to-read format. Structure information in short, clear paragraphs and, where applicable, use bullet points or numbered lists.
|
93 |
|
94 |
<history>
|
95 |
{history}
|
96 |
</history>
|
|
|
97 |
<context>
|
98 |
{contexts}
|
99 |
</context>
|
|
|
100 |
<question>
|
101 |
{query}
|
102 |
</question>
|
|
|
103 |
Provide a detailed, concise response that fully answers the user's question.
|
104 |
Make sure all sentences of your reponse are completely formed and grammatically correct.
|
105 |
If necessary, reduce the amount of detail provided to keep the response within the word limit but still complete.
|
106 |
|
107 |
+
Additionally, after your response, provide 2 or 3 related questions that the user might want to ask next based on the topic.
|
108 |
+
Stick strictly to this structure:
|
109 |
+
|
110 |
+
Here are some related questions you might be interested in:
|
111 |
+
<button style="background-color: #4CAF50; color: white; padding: 10px; margin: 5px; border: none; border-radius: 5px; cursor: pointer; font-size: 16px;">First related question</button>
|
112 |
+
<button style="background-color: #4CAF50; color: white; padding: 10px; margin: 5px; border: none; border-radius: 5px; cursor: pointer; font-size: 16px;">Second related question</button>
|
113 |
+
<button style="background-color: #4CAF50; color: white; padding: 10px; margin: 5px; border: none; border-radius: 5px; cursor: pointer; font-size: 16px;">Third related question</button>
|
114 |
+
|
115 |
+
Ensure these suggested questions are brief, relevant, and encourage further exploration on the topic.
|
116 |
+
|
117 |
Assistant:
|
118 |
"""
|
119 |
|
|
|
143 |
response_body = json.loads(response.get('body').read())
|
144 |
response_text = response_body['results'][0]['outputText']
|
145 |
|
146 |
+
if unique_article_ids:
|
147 |
+
article_urls_text = "\n\nFor more information, follow the links provided:\n" + "\n".join(
|
148 |
+
f"路 https://knowledge.operativeiq.com/articles/{article_id}" for article_id in unique_article_ids)
|
149 |
+
response_text += article_urls_text
|
150 |
|
151 |
prompt_and_time = f"""
|
152 |
Prompt:
|
|
|
161 |
prompt = ""
|
162 |
for turn in chat_history:
|
163 |
user_message, bot_message = turn
|
164 |
+
cleaned_message = clean_text(bot_message)
|
165 |
+
prompt = f"{prompt}User: {user_message}\nAssistant: {cleaned_message}\n"
|
166 |
return prompt
|
167 |
|
168 |
def respond(message, chat_history, temperature=0.9, top_p=0.6, max_token_count=512):
|
169 |
+
formatted_history = format_chat_history(chat_history[-4:])
|
170 |
chat_history.append([message, ""])
|
171 |
|
172 |
stream, prompt_and_time = get_answer(message, formatted_history, temperature, top_p, max_token_count)
|