fact-check / app.py
Shreemit's picture
Update app.py
265cb40 verified
raw
history blame
2.87 kB
import streamlit as st
from getpass import getpass
from langchain_google_genai import GoogleGenerativeAI
from langchain.prompts import PromptTemplate
from langchain.agents import AgentExecutor, initialize_agent, AgentType
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.utilities.tavily_search import TavilySearchAPIWrapper
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_google_genai import ChatGoogleGenerativeAI
def create_tools():
search = TavilySearchAPIWrapper(tavily_api_key='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')
description = """"A search engine optimized for comprehensive, accurate, \
and trusted results. Useful for when you need to answer questions \
about current events or about recent information. \
Input should be a search query. \
If the user is asking about something that you don't know about, \
you should probably use this tool to see if that can provide any information."""
tavily_tool = TavilySearchResults(api_wrapper=search, description=description)
return [tavily_tool]
def create_llm_with_tools(llm, tools):
return llm.bind(functions=tools)
def create_agent_chain(tools, llm):
return initialize_agent(
tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
def get_user_input():
return st.text_input("Enter your question")
def display_response(response):
st.write(response)
def main():
st.title('Fact-Checking Chatbot')
llm = GoogleGenerativeAI(model="gemini-pro", google_api_key="AIzaSyBNfTHLMjR9vGiomZsW9NFsUTwc2U2NuFA")
tools = create_tools()
llm_with_tools = create_llm_with_tools(llm, tools)
agent_chain = create_agent_chain(tools, llm)
user_input = get_user_input()
if user_input:
response = llm.invoke(user_input)
display_response(response)
prompt = """
You are a fact-checker. You are asked to verify the following statement based on the information you get from your tool
and your knowledge. You should provide a response that is based on the information you have and that is as accurate as possible.
Your response should be True or False. If you are not sure, you should say that you are not sure.
"""
new_prompt = st.text_area(prompt)
if new_prompt:
prompt = new_prompt
answer = agent_chain.invoke(
prompt + "\n " + user_input,
)
display_response(answer)
if __name__ == "__main__":
main()