qdqd commited on
Commit
df00324
·
verified ·
1 Parent(s): e6428b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -7
app.py CHANGED
@@ -27,22 +27,26 @@ def process_message(message, history, analysis_prompt, rethinking_prompt, refine
27
 
28
  gpt4o_prompt = f"{analysis_prompt}\n\nConversation history:\n{context}\n\nUser query: {message}\n\nPlease analyze this query and respond accordingly."
29
  gpt4o_response = get_llm_response(gpt4o_prompt, "gpt-4o-mini")
30
- full_response += "Analysis:\n"
31
  for word in gpt4o_response:
32
  full_response += word + " "
33
  time.sleep(0.1)
34
  yield full_response
 
 
35
 
36
  if "<error>" in " ".join(gpt4o_response):
37
  return
38
 
39
  llama_prompt = f"{rethinking_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nPlease review and suggest improvements or confirm if satisfactory."
40
  llama_response = get_llm_response(llama_prompt, "gpt-4o-mini")
41
- full_response += "\n\nRethinking:\n"
42
  for word in llama_response:
43
  full_response += word + " "
44
  time.sleep(0.1)
45
  yield full_response
 
 
46
 
47
  if "<error>" in " ".join(llama_response):
48
  return
@@ -50,22 +54,21 @@ def process_message(message, history, analysis_prompt, rethinking_prompt, refine
50
  if "done" not in " ".join(llama_response).lower():
51
  final_gpt4o_prompt = f"{refinement_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nSuggestion: {' '.join(llama_response)}\n\nPlease provide a final response considering the suggestion."
52
  final_response = get_llm_response(final_gpt4o_prompt, "gpt-4o-mini")
53
- full_response += "\n\nFinal Response:\n"
54
  for word in final_response:
55
  full_response += word + " "
56
  time.sleep(0.1)
57
  yield full_response
 
 
58
  else:
59
- full_response += "\n\nFinal Response: The initial response is satisfactory and no further refinement is needed."
60
  yield full_response
61
 
62
  def respond(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
63
  for chunk in process_message(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
64
  yield chunk
65
 
66
- # (The rest of the code remains the same: analysis_prompt, rethinking_prompt, refinement_prompt, and the Gradio
67
- # (Previous code remains the same)
68
-
69
  analysis_prompt = """
70
  You are Echo-Refraction, an AI assistant tasked with analyzing user queries. Your role is to:
71
  1. Carefully examine the user's input for clarity, completeness, and potential ambiguities.
 
27
 
28
  gpt4o_prompt = f"{analysis_prompt}\n\nConversation history:\n{context}\n\nUser query: {message}\n\nPlease analyze this query and respond accordingly."
29
  gpt4o_response = get_llm_response(gpt4o_prompt, "gpt-4o-mini")
30
+ full_response += "<analyzing>\n"
31
  for word in gpt4o_response:
32
  full_response += word + " "
33
  time.sleep(0.1)
34
  yield full_response
35
+ full_response += "\n</analyzing>"
36
+ yield full_response
37
 
38
  if "<error>" in " ".join(gpt4o_response):
39
  return
40
 
41
  llama_prompt = f"{rethinking_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nPlease review and suggest improvements or confirm if satisfactory."
42
  llama_response = get_llm_response(llama_prompt, "gpt-4o-mini")
43
+ full_response += "\n\n<rethinking>\n"
44
  for word in llama_response:
45
  full_response += word + " "
46
  time.sleep(0.1)
47
  yield full_response
48
+ full_response += "\n</rethinking>"
49
+ yield full_response
50
 
51
  if "<error>" in " ".join(llama_response):
52
  return
 
54
  if "done" not in " ".join(llama_response).lower():
55
  final_gpt4o_prompt = f"{refinement_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nSuggestion: {' '.join(llama_response)}\n\nPlease provide a final response considering the suggestion."
56
  final_response = get_llm_response(final_gpt4o_prompt, "gpt-4o-mini")
57
+ full_response += "\n\n<output>\n"
58
  for word in final_response:
59
  full_response += word + " "
60
  time.sleep(0.1)
61
  yield full_response
62
+ full_response += "\n</output>"
63
+ yield full_response
64
  else:
65
+ full_response += "\n\n<output>The initial response is satisfactory and no further refinement is needed.</output>"
66
  yield full_response
67
 
68
  def respond(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
69
  for chunk in process_message(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
70
  yield chunk
71
 
 
 
 
72
  analysis_prompt = """
73
  You are Echo-Refraction, an AI assistant tasked with analyzing user queries. Your role is to:
74
  1. Carefully examine the user's input for clarity, completeness, and potential ambiguities.