File size: 1,692 Bytes
fe5256f
 
 
 
 
 
b5a7a74
 
fe5256f
 
 
 
 
 
 
 
 
b5a7a74
 
fe5256f
 
 
 
b5a7a74
 
fe5256f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from langchain_community.llms import HuggingFaceEndpoint
import streamlit as st, Utilities as ut
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent, load_tools
from langchain_community.chat_models.huggingface import ChatHuggingFace
#from langchain_openai import OpenAI
import os
print('HF_TOKEN',os.getenv('HF_TOKEN'))

from langchain_community.callbacks.streamlit import (
    StreamlitCallbackHandler,
)

st_callback = StreamlitCallbackHandler(st.container())

initdict={}
initdict = ut.get_tokens()
#hf_token = initdict["hf_token"]
hf_token = os.getenv('HF_TOKEN')
reactstyle_prompt = initdict["reactstyle_prompt"]
serpapi_api_key = initdict["serpapi_api_key"]
llm_repoid = initdict["llm_repoid"]

#llm = HuggingFaceEndpoint(repo_id=llm_repoid,temperature=0.9,verbose=True)
llm = HuggingFaceEndpoint(repo_id=llm_repoid,huggingfacehub_api_token=hf_token,temperature=0.9,verbose=True)

tools = load_tools(["serpapi"],llm=llm,serpapi_api_key=serpapi_api_key)   
prompt = hub.pull(reactstyle_prompt)
agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True,handle_parsing_errors=True)

chat_model = ChatHuggingFace(llm=llm)
chat_model_with_stop = chat_model.bind(stop=["\nObservation"])

st.title("PatentGuru - Intelligent Chatbot")

if prompt := st.chat_input():
    st.chat_message("user").write(prompt)
    with st.chat_message("assistant"):
        st_callback = StreamlitCallbackHandler(st.container())
        
        response = agent_executor.invoke(
            {"input": prompt}, {"callbacks": [st_callback], "handle_parsing_errors":True}
        )
        st.write(response["output"])