Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -45,10 +45,13 @@ def chat_with_cerebras(user_input):
|
|
45 |
# End compute time measurement
|
46 |
compute_time = time.time() - start_time
|
47 |
|
48 |
-
|
|
|
|
|
|
|
49 |
|
50 |
except Exception as e:
|
51 |
-
return "Error: Unable to process your request.", "", str(e)
|
52 |
|
53 |
# Gradio interface
|
54 |
def gradio_ui():
|
@@ -68,25 +71,34 @@ def gradio_ui():
|
|
68 |
with gr.Row():
|
69 |
send_button = gr.Button("Send", variant="primary")
|
70 |
clear_button = gr.Button("Clear Chat")
|
|
|
71 |
|
72 |
def handle_chat(chat_history, user_input):
|
73 |
if not user_input.strip():
|
74 |
-
return chat_history, "", "", "Please enter a valid message."
|
75 |
-
ai_response, chain_of_thought, compute_info = chat_with_cerebras(user_input)
|
76 |
-
# Placeholder for token usage (to be replaced with actual logic if token count available)
|
77 |
-
token_usage = "Tokens used: Not available in this version"
|
78 |
chat_history.append((user_input, ai_response))
|
79 |
return chat_history, chain_of_thought, compute_info, token_usage
|
80 |
|
81 |
def clear_chat():
|
82 |
return [], "", "", ""
|
83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
send_button.click(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
|
85 |
clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
|
|
|
86 |
|
87 |
user_input.submit(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
|
88 |
|
89 |
-
gr.Markdown("""---\n### 🌟 Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Token Usage Tracking**:
|
90 |
|
91 |
return demo
|
92 |
|
|
|
45 |
# End compute time measurement
|
46 |
compute_time = time.time() - start_time
|
47 |
|
48 |
+
# Simulate token usage tracking (placeholder for real implementation)
|
49 |
+
token_usage = len(user_input.split()) + len(response.split())
|
50 |
+
|
51 |
+
return response, chain_of_thought, f"Compute Time: {compute_time:.2f} seconds", f"Tokens used: {token_usage}"
|
52 |
|
53 |
except Exception as e:
|
54 |
+
return "Error: Unable to process your request.", "", str(e), ""
|
55 |
|
56 |
# Gradio interface
|
57 |
def gradio_ui():
|
|
|
71 |
with gr.Row():
|
72 |
send_button = gr.Button("Send", variant="primary")
|
73 |
clear_button = gr.Button("Clear Chat")
|
74 |
+
export_button = gr.Button("Export Chat History")
|
75 |
|
76 |
def handle_chat(chat_history, user_input):
|
77 |
if not user_input.strip():
|
78 |
+
return chat_history, "", "", "", "Please enter a valid message."
|
79 |
+
ai_response, chain_of_thought, compute_info, token_usage = chat_with_cerebras(user_input)
|
|
|
|
|
80 |
chat_history.append((user_input, ai_response))
|
81 |
return chat_history, chain_of_thought, compute_info, token_usage
|
82 |
|
83 |
def clear_chat():
|
84 |
return [], "", "", ""
|
85 |
|
86 |
+
def export_chat(chat_history):
|
87 |
+
if not chat_history:
|
88 |
+
return "", "No chat history to export."
|
89 |
+
chat_text = "\n".join([f"User: {item[0]}\nAI: {item[1]}" for item in chat_history])
|
90 |
+
filename = f"chat_history_{int(time.time())}.txt"
|
91 |
+
with open(filename, "w") as file:
|
92 |
+
file.write(chat_text)
|
93 |
+
return f"Chat history exported to {filename}.", ""
|
94 |
+
|
95 |
send_button.click(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
|
96 |
clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
|
97 |
+
export_button.click(export_chat, inputs=[chat_history], outputs=[compute_time, chain_of_thought_display])
|
98 |
|
99 |
user_input.submit(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
|
100 |
|
101 |
+
gr.Markdown("""---\n### 🌟 Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Token Usage Tracking**: Monitor token usage per response for transparency.\n- **Export Chat History**: Save your conversation as a text file for future reference.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **Submit on Enter**: Seamless interaction with keyboard support.\n""")
|
102 |
|
103 |
return demo
|
104 |
|