yurasolokha commited on
Commit
a9c6918
·
verified ·
1 Parent(s): 9b42ba4

Fixed issue with suggestions

Browse files
Files changed (1) hide show
  1. app.py +53 -51
app.py CHANGED
@@ -62,20 +62,8 @@ def clean_text(text):
62
  return re.sub(r"(For more information, follow the links provided:).*", "", text, flags=re.DOTALL)
63
 
64
  def parse_suggestions(response):
65
- start_tag = "<SGs>"
66
- end_tag = "</SGs>"
67
-
68
- start_index = response.find(start_tag)
69
- end_index = response.find(end_tag) + len(end_tag)
70
-
71
- if start_index == -1 or end_index == -1:
72
- return []
73
-
74
- suggestions_xml = response[start_index:end_index]
75
-
76
- root = ET.fromstring(suggestions_xml)
77
- suggestions = [suggestion.text for suggestion in root.findall('SG')]
78
-
79
  return suggestions
80
 
81
  def get_answer(query, history, temperature, top_p, max_token_count):
@@ -85,7 +73,7 @@ def get_answer(query, history, temperature, top_p, max_token_count):
85
  max_words = math.floor(max_token_count*0.75)
86
 
87
  retrievalResults, retrieve_execution_time = retrieve(query)
88
- highest_score = retrievalResults[0]['score']
89
 
90
  if highest_score > 0.45:
91
  contexts = get_contexts(retrievalResults)
@@ -127,14 +115,10 @@ def get_answer(query, history, temperature, top_p, max_token_count):
127
  If necessary, reduce the amount of detail provided to keep the response within the word limit but still complete.
128
 
129
  Additionally, only if there is sufficient remaining token capacity, provide 2 or 3 related questions that the user might want to ask next based on the topic.
130
- If possible, use the following structure for these suggested questions:
131
- <SGs>
132
- <SG>First related question</SG>
133
- <SG>Second related question</SG>
134
- <SG>Third related question</SG>
135
- </SGs>
136
 
137
- Ensure these suggested questions are brief, relevant, and encourage further exploration on the topic.
138
  Assistant:
139
  """
140
 
@@ -164,20 +148,23 @@ def get_answer(query, history, temperature, top_p, max_token_count):
164
  response_body = json.loads(response.get('body').read())
165
  response_text = response_body['results'][0]['outputText']
166
 
167
- suggestions = parse_suggestions(response_text)
 
168
 
169
- if unique_article_ids:
170
- article_urls_text = "\n\nFor more information, follow the links provided:\n" + "\n".join(
171
- f"— https://knowledge.operativeiq.com/articles/{article_id}" for article_id in unique_article_ids)
172
 
173
  response_json = {
174
  "response_text": response_text.split('<SGs>')[0].strip(),
175
  "suggestions": suggestions,
176
- "article_urls": article_urls_text
177
  }
 
 
 
 
178
 
179
- # response_json_str = json.dumps(response_json, indent=4)
180
- # print("Response Json:\n", response_json_str)
181
 
182
  prompt_and_time = f"""
183
  Prompt:
@@ -185,7 +172,7 @@ def get_answer(query, history, temperature, top_p, max_token_count):
185
  Retrieve execution time: {retrieve_execution_time} seconds
186
  Invoke model execution time: {invoke_model_time} seconds
187
  """
188
- return response_json["response_text"] + response_json["article_urls"], prompt_and_time, response_json["suggestions"]
189
 
190
  def format_chat_history(chat_history):
191
  prompt = ""
@@ -201,12 +188,21 @@ def respond(message, chat_history, temperature=0.9, top_p=0.6, max_token_count=5
201
 
202
  stream, prompt_and_time, suggestions = get_answer(message, formatted_history, temperature, top_p, max_token_count)
203
 
 
 
 
 
204
  for idx, text_token in enumerate(stream):
205
  if idx == 0 and text_token.startswith(" "):
206
  text_token = text_token[1:]
207
 
208
  chat_history[-1][1] += text_token
209
- yield "", chat_history, prompt_and_time, suggestions[0], suggestions[1], suggestions[2]
 
 
 
 
 
210
 
211
  def clear_chat_history():
212
  return '', []
@@ -218,11 +214,15 @@ def main():
218
  }
219
  """) as demo:
220
  chatbot = gr.Chatbot([[None, initial_message]], height=600)
 
 
 
 
221
 
222
  with gr.Row():
223
- suggestion1 = gr.Button("How to edit RFID crew badge?", elem_classes="suggestion-button")
224
- suggestion2 = gr.Button("What types of RFID tags exist?", elem_classes="suggestion-button")
225
- suggestion3 = gr.Button("Is it possible to delete a facility?", elem_classes="suggestion-button")
226
 
227
  msg = gr.Textbox(label="Question")
228
 
@@ -235,33 +235,35 @@ def main():
235
  btn = gr.Button("Submit")
236
  clear = gr.Button("Clear history")
237
 
238
- btn.click(
239
- respond,
240
- inputs=[msg, chatbot, temperature, top_p, max_token_count],
241
- outputs=[msg, chatbot, prompt_and_time, suggestion1, suggestion2, suggestion3]
242
- )
 
 
 
 
 
 
243
 
244
  suggestion1.click(lambda s: s, inputs=suggestion1, outputs=msg).then(
245
  respond,
246
- inputs=[msg, chatbot, temperature, top_p, max_token_count],
247
- outputs=[msg, chatbot, prompt_and_time, suggestion1, suggestion2, suggestion3]
248
  )
249
  suggestion2.click(lambda s: s, inputs=suggestion2, outputs=msg).then(
250
  respond,
251
- inputs=[msg, chatbot, temperature, top_p, max_token_count],
252
- outputs=[msg, chatbot, prompt_and_time, suggestion1, suggestion2, suggestion3]
253
  )
254
  suggestion3.click(lambda s: s, inputs=suggestion3, outputs=msg).then(
255
  respond,
256
- inputs=[msg, chatbot, temperature, top_p, max_token_count],
257
- outputs=[msg, chatbot, prompt_and_time, suggestion1, suggestion2, suggestion3]
258
- )
259
-
260
- msg.submit(
261
- respond,
262
- inputs=[msg, chatbot, temperature, top_p, max_token_count],
263
- outputs=[msg, chatbot, prompt_and_time, suggestion1, suggestion2, suggestion3]
264
  )
 
 
265
 
266
  clear.click(clear_chat_history, outputs=[msg, chatbot, prompt_and_time])
267
 
 
62
  return re.sub(r"(For more information, follow the links provided:).*", "", text, flags=re.DOTALL)
63
 
64
  def parse_suggestions(response):
65
+ suggestions = re.findall(r'<SG>(.*?)</SG>', response)
66
+ suggestions = ["" if suggestion == "Suggestion" else suggestion for suggestion in suggestions]
 
 
 
 
 
 
 
 
 
 
 
 
67
  return suggestions
68
 
69
  def get_answer(query, history, temperature, top_p, max_token_count):
 
73
  max_words = math.floor(max_token_count*0.75)
74
 
75
  retrievalResults, retrieve_execution_time = retrieve(query)
76
+ highest_score = retrievalResults[0]['score'] if retrievalResults else 0
77
 
78
  if highest_score > 0.45:
79
  contexts = get_contexts(retrievalResults)
 
115
  If necessary, reduce the amount of detail provided to keep the response within the word limit but still complete.
116
 
117
  Additionally, only if there is sufficient remaining token capacity, provide 2 or 3 related questions that the user might want to ask next based on the topic.
118
+ Format these suggested questions as follows, leaving the <SG> tags empty if no suggestions are generated:
119
+ <SGs><SG>Suggestion</SG><SG>Suggestion</SG><SG>Suggestion</SG></SGs>
 
 
 
 
120
 
121
+ Ensure these suggested questions are brief, relevant, and encourage further exploration on the topic.
122
  Assistant:
123
  """
124
 
 
148
  response_body = json.loads(response.get('body').read())
149
  response_text = response_body['results'][0]['outputText']
150
 
151
+ print("response_text:\n")
152
+ pp.pprint(response_text)
153
 
154
+ suggestions = parse_suggestions(response_text)
 
 
155
 
156
  response_json = {
157
  "response_text": response_text.split('<SGs>')[0].strip(),
158
  "suggestions": suggestions,
159
+ "article_ids": unique_article_ids
160
  }
161
+
162
+ if response_json["article_ids"]:
163
+ article_urls_text = "\n\nFor more information, follow the links provided:\n" + "\n".join(
164
+ f"— https://knowledge.operativeiq.com/articles/{article_id}" for article_id in unique_article_ids)
165
 
166
+ response_json_str = json.dumps(response_json, indent=4)
167
+ print("Response Json:\n", response_json_str)
168
 
169
  prompt_and_time = f"""
170
  Prompt:
 
172
  Retrieve execution time: {retrieve_execution_time} seconds
173
  Invoke model execution time: {invoke_model_time} seconds
174
  """
175
+ return response_json["response_text"] + article_urls_text, prompt_and_time, response_json["suggestions"]
176
 
177
  def format_chat_history(chat_history):
178
  prompt = ""
 
188
 
189
  stream, prompt_and_time, suggestions = get_answer(message, formatted_history, temperature, top_p, max_token_count)
190
 
191
+ suggestion1_visible = bool(suggestions[0])
192
+ suggestion2_visible = bool(suggestions[1])
193
+ suggestion3_visible = bool(suggestions[2])
194
+
195
  for idx, text_token in enumerate(stream):
196
  if idx == 0 and text_token.startswith(" "):
197
  text_token = text_token[1:]
198
 
199
  chat_history[-1][1] += text_token
200
+ yield (
201
+ "", chat_history, prompt_and_time,
202
+ suggestions[0], suggestion1_visible,
203
+ suggestions[1], suggestion2_visible,
204
+ suggestions[2], suggestion3_visible
205
+ )
206
 
207
  def clear_chat_history():
208
  return '', []
 
214
  }
215
  """) as demo:
216
  chatbot = gr.Chatbot([[None, initial_message]], height=600)
217
+
218
+ suggestion1_visible = gr.State(value=True)
219
+ suggestion2_visible = gr.State(value=True)
220
+ suggestion3_visible = gr.State(value=True)
221
 
222
  with gr.Row():
223
+ suggestion1 = gr.Button("How to edit RFID crew badge?", elem_classes="suggestion-button", visible=suggestion1_visible)
224
+ suggestion2 = gr.Button("What types of RFID tags exist?", elem_classes="suggestion-button", visible=suggestion2_visible)
225
+ suggestion3 = gr.Button("Is it possible to delete a facility?", elem_classes="suggestion-button", visible=suggestion3_visible)
226
 
227
  msg = gr.Textbox(label="Question")
228
 
 
235
  btn = gr.Button("Submit")
236
  clear = gr.Button("Clear history")
237
 
238
+ inputs= [
239
+ msg, chatbot, temperature, top_p, max_token_count
240
+ ]
241
+ outputs = [
242
+ msg, chatbot, prompt_and_time,
243
+ suggestion1, suggestion1_visible,
244
+ suggestion2, suggestion2_visible,
245
+ suggestion3, suggestion3_visible
246
+ ]
247
+
248
+ btn.click(respond, inputs=inputs, outputs=outputs)
249
 
250
  suggestion1.click(lambda s: s, inputs=suggestion1, outputs=msg).then(
251
  respond,
252
+ inputs=inputs,
253
+ outputs=outputs
254
  )
255
  suggestion2.click(lambda s: s, inputs=suggestion2, outputs=msg).then(
256
  respond,
257
+ inputs=inputs,
258
+ outputs=outputs
259
  )
260
  suggestion3.click(lambda s: s, inputs=suggestion3, outputs=msg).then(
261
  respond,
262
+ inputs=inputs,
263
+ outputs=outputs
 
 
 
 
 
 
264
  )
265
+
266
+ msg.submit(respond, inputs=inputs, outputs=outputs)
267
 
268
  clear.click(clear_chat_history, outputs=[msg, chatbot, prompt_and_time])
269