QueryMind_AI / app.py
Somnath3570's picture
Create app.py
3f4930f verified
raw
history blame
3.61 kB
import os
import streamlit as st
import requests
from dotenv import load_dotenv
load_dotenv()
# Set environment variables
os.environ["GROQ_API_KEY"] = st.secrets["GROQ_API_KEY"]
os.environ["TAVILY_API_KEY"] = st.secrets["TAVILY_API_KEY"]
from langchain_groq import ChatGroq
from langchain_community.tools.tavily_search import TavilySearchResults
from langgraph.prebuilt import create_react_agent
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
def get_response_from_ai_agent(llm_id, query, allow_search, system_prompt):
"""
Create and invoke an AI agent with optional search capabilities
"""
try:
# Initialize LLM with proper configuration
llm = ChatGroq(
api_key=os.environ.get("GROQ_API_KEY"),
model_name=llm_id
)
# Setup tools based on allow_search flag
tools = []
if allow_search:
tools.append(TavilySearchResults(
api_key=os.environ.get("TAVILY_API_KEY"),
max_results=2
))
# Create the agent
agent = create_react_agent(
model=llm,
tools=tools
)
# Prepare the initial messages
initial_messages = [
SystemMessage(content=system_prompt),
HumanMessage(content=query)
]
# Create proper state with messages
state = {
"messages": initial_messages,
"next_steps": [],
"structured_response": None
}
# Invoke agent with proper state
response = agent.invoke(state)
# Handle response
if isinstance(response, dict):
if "messages" in response:
messages = response["messages"]
ai_messages = [msg for msg in messages if isinstance(msg, AIMessage)]
if ai_messages:
return ai_messages[-1].content
elif "structured_response" in response:
return response["structured_response"]
return "I apologize, but I couldn't generate a proper response. Please try again."
except Exception as e:
print(f"Debug - Error in get_response_from_ai_agent: {str(e)}")
raise Exception(f"Agent error: {str(e)}")
# Streamlit UI
st.set_page_config(page_title="AI Chatbot Agents", layout="centered")
st.title("AI Chatbot Agents")
st.write("Create and Interact with the AI Agents!")
system_prompt = st.text_area(
"Define your AI Agent: ",
height=70,
placeholder="Type your system prompt here...",
value="You are a helpful AI assistant."
)
MODEL_NAMES_GROQ = ["llama-3.3-70b-versatile", "mixtral-8x7b-32768"]
selected_model = st.selectbox("Select Groq Model:", MODEL_NAMES_GROQ)
allow_web_search = st.checkbox("Allow Web Search")
user_query = st.text_area(
"Enter your query: ",
height=150,
placeholder="Ask Anything!"
)
if st.button("Ask Agent!"):
if not user_query.strip():
st.error("Please enter a query!")
else:
try:
with st.spinner("Getting response..."):
response = get_response_from_ai_agent(
llm_id=selected_model,
query=user_query,
allow_search=allow_web_search,
system_prompt=system_prompt
)
st.subheader("Agent Response")
st.markdown(response)
except Exception as e:
st.error(f"An error occurred: {str(e)}")