Update app.py
Browse files
app.py
CHANGED
@@ -4,10 +4,7 @@ import os
|
|
4 |
import json
|
5 |
import time
|
6 |
|
7 |
-
|
8 |
-
client = groq.Groq()
|
9 |
-
|
10 |
-
def make_api_call(messages, max_tokens, is_final_answer=False):
|
11 |
for attempt in range(3):
|
12 |
try:
|
13 |
response = client.chat.completions.create(
|
@@ -26,7 +23,7 @@ def make_api_call(messages, max_tokens, is_final_answer=False):
|
|
26 |
return {"title": "Error", "content": f"Failed to generate step after 3 attempts. Error: {str(e)}", "next_action": "final_answer"}
|
27 |
time.sleep(1) # Wait for 1 second before retrying
|
28 |
|
29 |
-
def generate_response(prompt):
|
30 |
messages = [
|
31 |
{"role": "system", "content": """You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer. Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys. USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO. IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS. CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE. FULLY TEST ALL OTHER POSSIBILITIES. YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO. DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.
|
32 |
|
@@ -48,11 +45,16 @@ Example of a valid JSON response:
|
|
48 |
|
49 |
while True:
|
50 |
start_time = time.time()
|
51 |
-
step_data = make_api_call(messages, 300)
|
52 |
end_time = time.time()
|
53 |
thinking_time = end_time - start_time
|
54 |
total_thinking_time += thinking_time
|
55 |
|
|
|
|
|
|
|
|
|
|
|
56 |
step_title = f"Step {step_count}: {step_data.get('title', 'No Title')}"
|
57 |
step_content = step_data.get('content', 'No Content')
|
58 |
steps.append((step_title, step_content, thinking_time))
|
@@ -68,12 +70,15 @@ Example of a valid JSON response:
|
|
68 |
messages.append({"role": "user", "content": "Please provide the final answer based on your reasoning above."})
|
69 |
|
70 |
start_time = time.time()
|
71 |
-
final_data = make_api_call(messages, 200, is_final_answer=True)
|
72 |
end_time = time.time()
|
73 |
thinking_time = end_time - start_time
|
74 |
total_thinking_time += thinking_time
|
75 |
|
76 |
-
|
|
|
|
|
|
|
77 |
|
78 |
return steps, total_thinking_time
|
79 |
|
@@ -95,12 +100,25 @@ def format_steps(steps, total_time):
|
|
95 |
html_content += "<strong>Total thinking time: {:.2f} seconds</strong>".format(total_time)
|
96 |
return html_content
|
97 |
|
98 |
-
def main(user_query):
|
|
|
|
|
|
|
99 |
if not user_query:
|
100 |
return "Please enter a query to get started.", ""
|
101 |
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
return formatted_steps, ""
|
105 |
|
106 |
# Define the Gradio interface
|
@@ -115,6 +133,10 @@ with gr.Blocks() as demo:
|
|
115 |
|
116 |
with gr.Row():
|
117 |
with gr.Column():
|
|
|
|
|
|
|
|
|
118 |
user_input = gr.Textbox(
|
119 |
label="Enter your query:",
|
120 |
placeholder="e.g., How many 'R's are in the word strawberry?",
|
@@ -126,7 +148,7 @@ with gr.Blocks() as demo:
|
|
126 |
with gr.Column():
|
127 |
output_html = gr.HTML()
|
128 |
|
129 |
-
submit_btn.click(fn=main, inputs=user_input, outputs=output_html)
|
130 |
|
131 |
# Launch the Gradio app
|
132 |
if __name__ == "__main__":
|
|
|
4 |
import json
|
5 |
import time
|
6 |
|
7 |
+
def make_api_call(client, messages, max_tokens, is_final_answer=False):
|
|
|
|
|
|
|
8 |
for attempt in range(3):
|
9 |
try:
|
10 |
response = client.chat.completions.create(
|
|
|
23 |
return {"title": "Error", "content": f"Failed to generate step after 3 attempts. Error: {str(e)}", "next_action": "final_answer"}
|
24 |
time.sleep(1) # Wait for 1 second before retrying
|
25 |
|
26 |
+
def generate_response(client, prompt):
|
27 |
messages = [
|
28 |
{"role": "system", "content": """You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer. Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys. USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO. IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS. CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE. FULLY TEST ALL OTHER POSSIBILITIES. YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO. DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.
|
29 |
|
|
|
45 |
|
46 |
while True:
|
47 |
start_time = time.time()
|
48 |
+
step_data = make_api_call(client, messages, 300)
|
49 |
end_time = time.time()
|
50 |
thinking_time = end_time - start_time
|
51 |
total_thinking_time += thinking_time
|
52 |
|
53 |
+
# Handle potential errors
|
54 |
+
if step_data.get('title') == "Error":
|
55 |
+
steps.append((f"Step {step_count}: {step_data.get('title')}", step_data.get('content'), thinking_time))
|
56 |
+
break
|
57 |
+
|
58 |
step_title = f"Step {step_count}: {step_data.get('title', 'No Title')}"
|
59 |
step_content = step_data.get('content', 'No Content')
|
60 |
steps.append((step_title, step_content, thinking_time))
|
|
|
70 |
messages.append({"role": "user", "content": "Please provide the final answer based on your reasoning above."})
|
71 |
|
72 |
start_time = time.time()
|
73 |
+
final_data = make_api_call(client, messages, 200, is_final_answer=True)
|
74 |
end_time = time.time()
|
75 |
thinking_time = end_time - start_time
|
76 |
total_thinking_time += thinking_time
|
77 |
|
78 |
+
if final_data.get('title') == "Error":
|
79 |
+
steps.append(("Final Answer", final_data.get('content'), thinking_time))
|
80 |
+
else:
|
81 |
+
steps.append(("Final Answer", final_data.get('content', 'No Content'), thinking_time))
|
82 |
|
83 |
return steps, total_thinking_time
|
84 |
|
|
|
100 |
html_content += "<strong>Total thinking time: {:.2f} seconds</strong>".format(total_time)
|
101 |
return html_content
|
102 |
|
103 |
+
def main(api_key, user_query):
|
104 |
+
if not api_key:
|
105 |
+
return "Please enter your Groq API key to proceed.", ""
|
106 |
+
|
107 |
if not user_query:
|
108 |
return "Please enter a query to get started.", ""
|
109 |
|
110 |
+
try:
|
111 |
+
# Initialize the Groq client with the provided API key
|
112 |
+
client = groq.Groq(api_key=api_key)
|
113 |
+
except Exception as e:
|
114 |
+
return f"Failed to initialize Groq client. Error: {str(e)}", ""
|
115 |
+
|
116 |
+
try:
|
117 |
+
steps, total_time = generate_response(client, user_query)
|
118 |
+
formatted_steps = format_steps(steps, total_time)
|
119 |
+
except Exception as e:
|
120 |
+
return f"An error occurred during processing. Error: {str(e)}", ""
|
121 |
+
|
122 |
return formatted_steps, ""
|
123 |
|
124 |
# Define the Gradio interface
|
|
|
133 |
|
134 |
with gr.Row():
|
135 |
with gr.Column():
|
136 |
+
api_input = gr.Password(
|
137 |
+
label="Enter your Groq API Key:",
|
138 |
+
placeholder="Your Groq API Key",
|
139 |
+
)
|
140 |
user_input = gr.Textbox(
|
141 |
label="Enter your query:",
|
142 |
placeholder="e.g., How many 'R's are in the word strawberry?",
|
|
|
148 |
with gr.Column():
|
149 |
output_html = gr.HTML()
|
150 |
|
151 |
+
submit_btn.click(fn=main, inputs=[api_input, user_input], outputs=output_html)
|
152 |
|
153 |
# Launch the Gradio app
|
154 |
if __name__ == "__main__":
|