TachyHealthResearch's picture
Upload 2 files
b542f7e verified
raw
history blame
4.13 kB
import streamlit as st
from openai import OpenAI
import time
import os
import logging
from groq import Groq
# Logging setup
logging.basicConfig(level=logging.INFO)
# Streamlit page configuration
st.set_page_config(
page_title="Groq AI Reasoning Chatbot",
page_icon="πŸ€–",
layout="centered"
)
# Custom CSS for better visuals
st.markdown("""
<style>
.stApp {
background-color: #F5F5F5;
}
.chat-container {
background-color: #FFFFFF;
margin: 1rem 0;
padding: 1rem;
border-radius: 10px;
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
}
.user-message {
text-align: right;
background-color: #007AFF;
color: white;
padding: 0.5rem 1rem;
border-radius: 15px;
margin: 0.5rem 0;
display: inline-block;
max-width: 70%;
}
.bot-message {
text-align: left;
background-color: #E9ECEF;
padding: 0.5rem 1rem;
border-radius: 15px;
margin: 0.5rem 0;
display: inline-block;
max-width: 70%;
}
</style>
""", unsafe_allow_html=True)
# Groq API client initialization
@st.cache_resource
def init_groq_client():
return Groq(api_key=os.getenv("GROQ_API_KEY"))
# Chat with Groq model
def chat_with_groq(client, message, history):
try:
# Build the conversation context
messages = [
{"role": "system", "content": "You are a helpful assistant. Think step by step before answering."},
*[{"role": "user" if i % 2 == 0 else "assistant", "content": m} for h in history for i, m in enumerate(h)],
{"role": "user", "content": message}
]
# Call the Groq model
completion = client.chat.completions.create(
model="deepseek-r1-distill-llama-70b",
messages=messages,
temperature=0.6,
max_tokens=1024,
top_p=0.95,
stream=True, # Stream the response
)
# Stream the response chunk by chunk
response = ""
for chunk in completion:
content = chunk.choices[0].delta.content or ""
response += content
yield response
except Exception as e:
logging.error(f"Error during Groq inference: {str(e)}")
yield f"An error occurred: {str(e)}. Please check your API key and network connection."
# Initialize app state
if "history" not in st.session_state:
st.session_state["history"] = [] # [(user_message, bot_response), ...]
# Display the app title and description
st.title("Groq AI Reasoning Chatbot πŸ€–")
st.write("Ask the chatbot anything and it will provide step-by-step reasoning.")
# Input form
with st.form("chat_form", clear_on_submit=True):
user_message = st.text_input("Your Message:", key="user_input")
submitted = st.form_submit_button("Send")
# Process user input and display chat
if submitted and user_message:
# Add user message to history
st.session_state["history"].append((user_message, None))
# Display chat history
for user_text, bot_text in st.session_state["history"]:
st.markdown(f'<div class="user-message">{user_text}</div>', unsafe_allow_html=True)
if bot_text:
st.markdown(f'<div class="bot-message">{bot_text}</div>', unsafe_allow_html=True)
# Initialize Groq client
groq_client = init_groq_client()
# Generate bot response
response_placeholder = st.empty() # Placeholder for streaming updates
bot_response = ""
for partial_response in chat_with_groq(groq_client, user_message, st.session_state["history"][:-1]):
bot_response = partial_response # Update bot response incrementally
response_placeholder.markdown(f'<div class="bot-message">{bot_response}</div>', unsafe_allow_html=True)
# Update history with full bot response
st.session_state["history"][-1] = (user_message, bot_response)
# Clear chat history button
if st.button("Clear Chat"):
st.session_state["history"] = []
st.experimental_rerun()
# Footer
st.markdown("---")
st.markdown("Made with ❀️ using Groq AI")