Somnath3570 commited on
Commit
da9fa96
·
verified ·
1 Parent(s): 03330cf

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -113
app.py DELETED
@@ -1,113 +0,0 @@
1
- import os
2
- import streamlit as st
3
- import requests
4
- from dotenv import load_dotenv
5
- load_dotenv()
6
-
7
- # Set environment variables
8
- os.environ["GROQ_API_KEY"] = st.secrets["GROQ_API_KEY"]
9
- os.environ["TAVILY_API_KEY"] = st.secrets["TAVILY_API_KEY"]
10
-
11
- from langchain_groq import ChatGroq
12
- from langchain_community.tools.tavily_search import TavilySearchResults
13
- from langgraph.prebuilt import create_react_agent
14
- from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
15
-
16
- def get_response_from_ai_agent(llm_id, query, allow_search, system_prompt):
17
- """
18
- Create and invoke an AI agent with optional search capabilities
19
- """
20
- try:
21
- # Initialize LLM with proper configuration
22
- llm = ChatGroq(
23
- api_key=os.environ.get("GROQ_API_KEY"),
24
- model_name=llm_id
25
- )
26
-
27
- # Setup tools based on allow_search flag
28
- tools = []
29
- if allow_search:
30
- tools.append(TavilySearchResults(
31
- api_key=os.environ.get("TAVILY_API_KEY"),
32
- max_results=2
33
- ))
34
-
35
- # Create the agent
36
- agent = create_react_agent(
37
- model=llm,
38
- tools=tools
39
- )
40
-
41
- # Prepare the initial messages
42
- initial_messages = [
43
- SystemMessage(content=system_prompt),
44
- HumanMessage(content=query)
45
- ]
46
-
47
- # Create proper state with messages
48
- state = {
49
- "messages": initial_messages,
50
- "next_steps": [],
51
- "structured_response": None
52
- }
53
-
54
- # Invoke agent with proper state
55
- response = agent.invoke(state)
56
-
57
- # Handle response
58
- if isinstance(response, dict):
59
- if "messages" in response:
60
- messages = response["messages"]
61
- ai_messages = [msg for msg in messages if isinstance(msg, AIMessage)]
62
- if ai_messages:
63
- return ai_messages[-1].content
64
- elif "structured_response" in response:
65
- return response["structured_response"]
66
-
67
- return "I apologize, but I couldn't generate a proper response. Please try again."
68
-
69
- except Exception as e:
70
- print(f"Debug - Error in get_response_from_ai_agent: {str(e)}")
71
- raise Exception(f"Agent error: {str(e)}")
72
-
73
- # Streamlit UI
74
- st.set_page_config(page_title="AI Chatbot Agents", layout="centered")
75
- st.title("AI Chatbot Agents")
76
- st.write("Create and Interact with the AI Agents!")
77
-
78
- system_prompt = st.text_area(
79
- "Define your AI Agent: ",
80
- height=70,
81
- placeholder="Type your system prompt here...",
82
- value="You are a helpful AI assistant."
83
- )
84
-
85
- MODEL_NAMES_GROQ = ["llama-3.3-70b-versatile", "mixtral-8x7b-32768"]
86
- selected_model = st.selectbox("Select Groq Model:", MODEL_NAMES_GROQ)
87
-
88
- allow_web_search = st.checkbox("Allow Web Search")
89
-
90
- user_query = st.text_area(
91
- "Enter your query: ",
92
- height=150,
93
- placeholder="Ask Anything!"
94
- )
95
-
96
- if st.button("Ask Agent!"):
97
- if not user_query.strip():
98
- st.error("Please enter a query!")
99
- else:
100
- try:
101
- with st.spinner("Getting response..."):
102
- response = get_response_from_ai_agent(
103
- llm_id=selected_model,
104
- query=user_query,
105
- allow_search=allow_web_search,
106
- system_prompt=system_prompt
107
- )
108
-
109
- st.subheader("Agent Response")
110
- st.markdown(response)
111
-
112
- except Exception as e:
113
- st.error(f"An error occurred: {str(e)}")