|
import gradio as gr |
|
from duckduckgo_search import DDGS |
|
from collections import deque |
|
import time |
|
import random |
|
|
|
def get_llm_response(prompt, model, max_retries=3): |
|
for attempt in range(max_retries): |
|
try: |
|
return DDGS().chat(prompt, model=model) |
|
except Exception as e: |
|
if attempt < max_retries - 1: |
|
print(f"Error occurred: {e}. Retrying in {2**attempt} seconds...") |
|
time.sleep(2**attempt + random.random()) |
|
else: |
|
print(f"Max retries reached. Error: {e}") |
|
return f"<error>Unable to get response from {model} after {max_retries} attempts.</error>" |
|
|
|
def process_message(message, history, analysis_prompt, rethinking_prompt, refinement_prompt): |
|
conversation_history = deque(maxlen=5) |
|
for h in history: |
|
conversation_history.append(f"User: {h[0]}\nEcho-Refraction: {h[1]}") |
|
|
|
context = "\n".join(conversation_history) |
|
|
|
gpt4o_prompt = f"{analysis_prompt}\n\nConversation history:\n{context}\n\nUser query: {message}\n\nPlease analyze this query and respond accordingly." |
|
gpt4o_response = get_llm_response(gpt4o_prompt, "gpt-4o-mini") |
|
yield f"Analysis: {gpt4o_response}" |
|
|
|
if "<error>" in gpt4o_response: |
|
return |
|
|
|
llama_prompt = f"{rethinking_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {gpt4o_response}\n\nPlease review and suggest improvements or confirm if satisfactory." |
|
llama_response = get_llm_response(llama_prompt, "gpt-4o-mini") |
|
yield f"Analysis: {gpt4o_response}\nRethinking: {llama_response}" |
|
|
|
if "<error>" in llama_response: |
|
return gpt4o_response |
|
|
|
if "done" not in llama_response.lower(): |
|
final_gpt4o_prompt = f"{refinement_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {gpt4o_response}\n\nSuggestion: {llama_response}\n\nPlease provide a final response considering the suggestion." |
|
final_response = get_llm_response(final_gpt4o_prompt, "gpt-4o-mini") |
|
yield f"Analysis: {gpt4o_response}\nRethinking: {llama_response}\nFinal Response: {final_response}" |
|
return final_response |
|
else: |
|
return gpt4o_response |
|
|
|
def respond(message, history, analysis_prompt, rethinking_prompt, refinement_prompt): |
|
response = "" |
|
for chunk in process_message(message, history, analysis_prompt, rethinking_prompt, refinement_prompt): |
|
response = chunk |
|
yield response |
|
|
|
|
|
final_response = response.split("Final Response: ")[-1] if "Final Response: " in response else response |
|
return final_response |
|
|
|
analysis_prompt = """ |
|
You are Echo-Refraction, an AI assistant tasked with analyzing user queries. Your role is to: |
|
1. Carefully examine the user's input for clarity, completeness, and potential ambiguities. |
|
2. Identify if the query needs refinement or additional information. |
|
3. If refinement is needed, suggest specific improvements or ask clarifying questions. |
|
4. If the query is clear, respond with "Query is clear and ready for processing." |
|
5. Provide a brief explanation of your analysis in all cases. |
|
Enclose your response in <analyzing> tags. |
|
""" |
|
|
|
rethinking_prompt = """ |
|
You are Echo-Refraction, an advanced AI model responsible for critically evaluating and improving responses. Your task is to: |
|
1. Carefully review the original user query and the initial response. |
|
2. Analyze the response for accuracy, relevance, completeness, and potential improvements. |
|
3. Consider perspectives or approaches that might enhance the response. |
|
4. If you identify areas for improvement: |
|
a. Clearly explain what aspects need refinement and why. |
|
b. Provide specific suggestions for how the response could be enhanced. |
|
c. If necessary, propose additional information or context that could be included. |
|
5. If the initial response is satisfactory and you have no suggestions for improvement, respond with "Done." |
|
Enclose your response in <rethinking> tags. |
|
""" |
|
|
|
refinement_prompt = """ |
|
You are Echo-Refraction, an AI assistant tasked with providing a final, refined response to the user. Your role is to: |
|
1. Review the original user query, your initial response, and the suggestions provided. |
|
2. Consider the feedback and suggestions for improvement. |
|
3. Integrate the suggested improvements into your response, ensuring that: |
|
a. The information is accurate and up-to-date. |
|
b. The response is comprehensive and addresses all aspects of the user's query. |
|
c. The language is clear, concise, and appropriate for the user's level of understanding. |
|
4. If you disagree with any suggestions, provide a brief explanation of why you chose not to incorporate them. |
|
5. Deliver a final response that represents the best possible answer to the user's query. |
|
Enclose your response in <output> tags. |
|
""" |
|
|
|
demo = gr.ChatInterface( |
|
respond, |
|
additional_inputs=[ |
|
gr.Textbox(value=analysis_prompt, label="Analysis Prompt", lines=10), |
|
gr.Textbox(value=rethinking_prompt, label="Rethinking Prompt", lines=10), |
|
gr.Textbox(value=refinement_prompt, label="Refinement Prompt", lines=10), |
|
], |
|
title="Echo-Refraction AI Assistant", |
|
description="Chat with Echo-Refraction, an AI assistant that analyzes, rethinks, and refines responses.", |
|
examples=[ |
|
["How many 'r' are there in the word 'strawberry'"], |
|
["Explain the concept of quantum entanglement."], |
|
["How does photosynthesis work?"], |
|
], |
|
cache_examples=False, |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |