Update app.py
Browse files
app.py
CHANGED
@@ -23,17 +23,22 @@ def process_message(message, history, analysis_prompt, rethinking_prompt, refine
|
|
23 |
conversation_history.append(f"User: {h[0]}\nEcho-Refraction: {h[1]}")
|
24 |
|
25 |
context = "\n".join(conversation_history)
|
|
|
26 |
|
27 |
gpt4o_prompt = f"{analysis_prompt}\n\nConversation history:\n{context}\n\nUser query: {message}\n\nPlease analyze this query and respond accordingly."
|
28 |
gpt4o_response = get_llm_response(gpt4o_prompt, "gpt-4o-mini")
|
29 |
-
|
|
|
|
|
30 |
|
31 |
if "<error>" in " ".join(gpt4o_response):
|
32 |
return
|
33 |
|
34 |
llama_prompt = f"{rethinking_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nPlease review and suggest improvements or confirm if satisfactory."
|
35 |
llama_response = get_llm_response(llama_prompt, "gpt-4o-mini")
|
36 |
-
|
|
|
|
|
37 |
|
38 |
if "<error>" in " ".join(llama_response):
|
39 |
return
|
@@ -41,9 +46,12 @@ def process_message(message, history, analysis_prompt, rethinking_prompt, refine
|
|
41 |
if "done" not in " ".join(llama_response).lower():
|
42 |
final_gpt4o_prompt = f"{refinement_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nSuggestion: {' '.join(llama_response)}\n\nPlease provide a final response considering the suggestion."
|
43 |
final_response = get_llm_response(final_gpt4o_prompt, "gpt-4o-mini")
|
44 |
-
|
|
|
|
|
45 |
else:
|
46 |
-
|
|
|
47 |
|
48 |
def stream_words(prefix, words):
|
49 |
response = prefix
|
@@ -53,12 +61,8 @@ def stream_words(prefix, words):
|
|
53 |
yield response
|
54 |
|
55 |
def respond(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
|
56 |
-
full_response = ""
|
57 |
for chunk in process_message(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
|
58 |
-
|
59 |
-
yield full_response
|
60 |
-
|
61 |
-
return full_response
|
62 |
|
63 |
# (The rest of the code remains the same: analysis_prompt, rethinking_prompt, refinement_prompt, and the Gradio
|
64 |
# (Previous code remains the same)
|
|
|
23 |
conversation_history.append(f"User: {h[0]}\nEcho-Refraction: {h[1]}")
|
24 |
|
25 |
context = "\n".join(conversation_history)
|
26 |
+
full_response = ""
|
27 |
|
28 |
gpt4o_prompt = f"{analysis_prompt}\n\nConversation history:\n{context}\n\nUser query: {message}\n\nPlease analyze this query and respond accordingly."
|
29 |
gpt4o_response = get_llm_response(gpt4o_prompt, "gpt-4o-mini")
|
30 |
+
for chunk in stream_words("Analysis: ", gpt4o_response):
|
31 |
+
full_response = chunk
|
32 |
+
yield full_response
|
33 |
|
34 |
if "<error>" in " ".join(gpt4o_response):
|
35 |
return
|
36 |
|
37 |
llama_prompt = f"{rethinking_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nPlease review and suggest improvements or confirm if satisfactory."
|
38 |
llama_response = get_llm_response(llama_prompt, "gpt-4o-mini")
|
39 |
+
for chunk in stream_words("\n\nRethinking: ", llama_response):
|
40 |
+
full_response += chunk
|
41 |
+
yield full_response
|
42 |
|
43 |
if "<error>" in " ".join(llama_response):
|
44 |
return
|
|
|
46 |
if "done" not in " ".join(llama_response).lower():
|
47 |
final_gpt4o_prompt = f"{refinement_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nSuggestion: {' '.join(llama_response)}\n\nPlease provide a final response considering the suggestion."
|
48 |
final_response = get_llm_response(final_gpt4o_prompt, "gpt-4o-mini")
|
49 |
+
for chunk in stream_words("\n\nFinal Response: ", final_response):
|
50 |
+
full_response += chunk
|
51 |
+
yield full_response
|
52 |
else:
|
53 |
+
full_response += "\n\nFinal Response: The initial response is satisfactory and no further refinement is needed."
|
54 |
+
yield full_response
|
55 |
|
56 |
def stream_words(prefix, words):
|
57 |
response = prefix
|
|
|
61 |
yield response
|
62 |
|
63 |
def respond(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
|
|
|
64 |
for chunk in process_message(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
|
65 |
+
yield chunk
|
|
|
|
|
|
|
66 |
|
67 |
# (The rest of the code remains the same: analysis_prompt, rethinking_prompt, refinement_prompt, and the Gradio
|
68 |
# (Previous code remains the same)
|