Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -1,26 +1,18 @@
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
-
from dotenv import load_dotenv
|
4 |
from pydantic import BaseModel
|
5 |
-
from
|
6 |
from langchain_core.prompts import ChatPromptTemplate
|
7 |
from langchain_core.output_parsers import PydanticOutputParser
|
8 |
from langchain.agents import create_tool_calling_agent, AgentExecutor
|
9 |
from tools import search_tool, wiki_tool, save_tool
|
10 |
|
11 |
-
# β
Get
|
12 |
-
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
raise ValueError("Missing Hugging Face API token. Please set the 'HUGGINGFACEHUB_API_TOKEN' secret in your HF space settings.")
|
17 |
|
18 |
-
# β
Use HuggingFace Mistral model (free), Initialize your LLM using the token
|
19 |
-
llm = HuggingFaceHub(
|
20 |
-
repo_id="mistralai/Mistral-7B-Instruct-v0.3",
|
21 |
-
huggingfacehub_api_token=hf_token,
|
22 |
-
model_kwargs={"temperature": 0.7, "max_new_tokens": 500},
|
23 |
-
)
|
24 |
|
25 |
# Define the response format
|
26 |
class ResearchResponse(BaseModel):
|
@@ -29,6 +21,9 @@ class ResearchResponse(BaseModel):
|
|
29 |
sources: list[str]
|
30 |
tools_used: list[str]
|
31 |
|
|
|
|
|
|
|
32 |
# Create parser
|
33 |
parser = PydanticOutputParser(pydantic_object=ResearchResponse)
|
34 |
|
@@ -38,7 +33,7 @@ prompt = ChatPromptTemplate.from_messages(
|
|
38 |
(
|
39 |
"system",
|
40 |
"""
|
41 |
-
You are an AI assistant that will help
|
42 |
Answer the user query and use necessary tools. Don't answer any offensive questions or any swearings.
|
43 |
Wrap the output in this format and provide no other text\n{format_instructions}
|
44 |
""",
|
@@ -60,19 +55,24 @@ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=False)
|
|
60 |
st.set_page_config(page_title="TechTales AI - Your own AI Powered Assistant", layout="centered")
|
61 |
st.title("π‘ TechTales AI - Your own AI Powered Assistant")
|
62 |
|
|
|
63 |
if "messages" not in st.session_state:
|
64 |
st.session_state.messages = []
|
65 |
|
|
|
66 |
for msg in st.session_state.messages:
|
67 |
st.chat_message(msg["role"]).write(msg["content"])
|
68 |
|
|
|
69 |
query = st.text_input("I am your personal Assistant, Ask me anything...", key="query_input")
|
70 |
submit_button = st.button("Enter")
|
71 |
|
72 |
if submit_button and query:
|
|
|
73 |
st.session_state.messages.append({"role": "user", "content": query})
|
74 |
st.chat_message("user").write(query)
|
75 |
|
|
|
76 |
raw_response = agent_executor.invoke({"query": query})
|
77 |
|
78 |
try:
|
@@ -81,12 +81,15 @@ if submit_button and query:
|
|
81 |
except Exception:
|
82 |
response_text = "I'm sorry, I couldn't process that request."
|
83 |
|
|
|
84 |
st.session_state.messages.append({"role": "assistant", "content": response_text})
|
85 |
st.chat_message("assistant").write(response_text)
|
86 |
|
|
|
87 |
if st.button("ποΈ Clear Chat"):
|
88 |
st.session_state.messages = []
|
89 |
-
st.rerun()
|
90 |
|
|
|
91 |
st.markdown("---")
|
92 |
st.markdown("**Developed by: Pankaj Kumar**", unsafe_allow_html=True)
|
|
|
1 |
import os
|
2 |
import streamlit as st
|
|
|
3 |
from pydantic import BaseModel
|
4 |
+
from langchain_anthropic import ChatAnthropic
|
5 |
from langchain_core.prompts import ChatPromptTemplate
|
6 |
from langchain_core.output_parsers import PydanticOutputParser
|
7 |
from langchain.agents import create_tool_calling_agent, AgentExecutor
|
8 |
from tools import search_tool, wiki_tool, save_tool
|
9 |
|
10 |
+
# β
Get key from Hugging Face Secrets
|
11 |
+
claude_key = os.getenv("CLAUDE_API_KEY")
|
12 |
|
13 |
+
if claude_key is None:
|
14 |
+
raise ValueError("Claude API key not found. Please set CLAUDE_API_KEY in Hugging Face Secrets.")
|
|
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
# Define the response format
|
18 |
class ResearchResponse(BaseModel):
|
|
|
21 |
sources: list[str]
|
22 |
tools_used: list[str]
|
23 |
|
24 |
+
# Initialize Claude model with low token usage
|
25 |
+
llm = ChatAnthropic(model="claude-3-5-sonnet-20241022", max_tokens=300, anthropic_api_key=claude_key)
|
26 |
+
|
27 |
# Create parser
|
28 |
parser = PydanticOutputParser(pydantic_object=ResearchResponse)
|
29 |
|
|
|
33 |
(
|
34 |
"system",
|
35 |
"""
|
36 |
+
You are an AI assistant that will help of any general questions.
|
37 |
Answer the user query and use necessary tools. Don't answer any offensive questions or any swearings.
|
38 |
Wrap the output in this format and provide no other text\n{format_instructions}
|
39 |
""",
|
|
|
55 |
st.set_page_config(page_title="TechTales AI - Your own AI Powered Assistant", layout="centered")
|
56 |
st.title("π‘ TechTales AI - Your own AI Powered Assistant")
|
57 |
|
58 |
+
# Initialize chat history in session state
|
59 |
if "messages" not in st.session_state:
|
60 |
st.session_state.messages = []
|
61 |
|
62 |
+
# Display chat history
|
63 |
for msg in st.session_state.messages:
|
64 |
st.chat_message(msg["role"]).write(msg["content"])
|
65 |
|
66 |
+
# Chat input with Enter button
|
67 |
query = st.text_input("I am your personal Assistant, Ask me anything...", key="query_input")
|
68 |
submit_button = st.button("Enter")
|
69 |
|
70 |
if submit_button and query:
|
71 |
+
# Add user message to history
|
72 |
st.session_state.messages.append({"role": "user", "content": query})
|
73 |
st.chat_message("user").write(query)
|
74 |
|
75 |
+
# Get AI response
|
76 |
raw_response = agent_executor.invoke({"query": query})
|
77 |
|
78 |
try:
|
|
|
81 |
except Exception:
|
82 |
response_text = "I'm sorry, I couldn't process that request."
|
83 |
|
84 |
+
# Add AI response to history
|
85 |
st.session_state.messages.append({"role": "assistant", "content": response_text})
|
86 |
st.chat_message("assistant").write(response_text)
|
87 |
|
88 |
+
# Clear Chat Button
|
89 |
if st.button("ποΈ Clear Chat"):
|
90 |
st.session_state.messages = []
|
91 |
+
st.rerun() # Corrected method
|
92 |
|
93 |
+
# Developer Name Display
|
94 |
st.markdown("---")
|
95 |
st.markdown("**Developed by: Pankaj Kumar**", unsafe_allow_html=True)
|