lostecho commited on
Commit
d28e15c
·
verified ·
1 Parent(s): 88c7a95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -2
app.py CHANGED
@@ -14,6 +14,58 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
14
  from langchain_core.pydantic_v1 import BaseModel, Field
15
  from langchain_google_genai import ChatGoogleGenerativeAI
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  def create_tools():
18
  search = TavilySearchAPIWrapper(tavily_api_key='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')
19
  description = """"A search engine optimized for comprehensive, accurate, \
@@ -53,15 +105,19 @@ def main():
53
  response = llm.invoke(user_input)
54
  display_response(response)
55
  prompt = """
56
- You are a fact-checker. You are asked to verify the following statement based on the information you get from your tool
57
  and your knowledge. You should provide a response that is based on the information you have and that is as accurate as possible.
58
  Your response should be True or False. If you are not sure, you should say that you are not sure.
59
  """
60
  new_prompt = st.text_area(prompt)
 
 
 
 
61
  if new_prompt:
62
  prompt = new_prompt
63
  answer = agent_chain.invoke(
64
- prompt + "\n " + user_input,
65
  )
66
  display_response(answer)
67
 
 
14
  from langchain_core.pydantic_v1 import BaseModel, Field
15
  from langchain_google_genai import ChatGoogleGenerativeAI
16
 
17
+ API_GOOGLE_SEARCH_KEY = "AIzaSyA4oDDFtPxAfmPC8EcfQrkByb9xKm2QfMc"
18
+
19
+ # claim_to_check = "The Earth is round"
20
+ # result = query_fact_check_api(claim_to_check)
21
+
22
+ # if result.get("claims"):
23
+ # for claim in result["claims"]:
24
+ # print("Claim:", claim["text"])
25
+ # print("Fact Check Results:")
26
+ # for review in claim["claimReview"]:
27
+ # print(f"\tPublisher: {review['publisher']['name']}")
28
+ # print(f"\tURL: {review['url']}")
29
+ # print(f"\tRating: {review['textualRating']}\n")
30
+ # else:
31
+ # print("No fact checks found for this claim.")
32
+
33
+ def query_fact_check_api(claim):
34
+ """Queries the Google Fact Check Tools API for a given claim.
35
+
36
+ Args:
37
+ claim (str): The claim to search for fact checks.
38
+
39
+ Returns:
40
+ dict: The API response parsed as a JSON object.
41
+ """
42
+
43
+ url = "https://factchecktools.googleapis.com/v1alpha1/claims:search"
44
+ params = {
45
+ "key": API_GOOGLE_SEARCH_KEY,
46
+ "query": claim,
47
+ }
48
+
49
+ response = requests.get(url, params=params)
50
+ response.raise_for_status() # Raise an exception for error HTTP statuses
51
+
52
+ return response.json()
53
+
54
+ def response_break_out(response):
55
+ if response.get("claims"):
56
+ iteration = 0
57
+ answer = """Below is the searched result"""
58
+ for claim in response["claims"]:
59
+ answer = agent_chain.invoke(answer + """claim: """ + claim)
60
+ for review in claim["claimReview"]:
61
+ answer = agent_chain.invoke(answer + """publisher: """ + review['publisher']['name'])
62
+ answer = agent_chain.invoke(answer + """rating: """ + review['textualRating'])
63
+ break
64
+ else:
65
+ answer = """No fact checks found for this claim."""
66
+
67
+ return answer
68
+
69
  def create_tools():
70
  search = TavilySearchAPIWrapper(tavily_api_key='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')
71
  description = """"A search engine optimized for comprehensive, accurate, \
 
105
  response = llm.invoke(user_input)
106
  display_response(response)
107
  prompt = """
108
+ You are a fact-checker. You are asked to verify the following statement based on the information you get from your tool, the search result,
109
  and your knowledge. You should provide a response that is based on the information you have and that is as accurate as possible.
110
  Your response should be True or False. If you are not sure, you should say that you are not sure.
111
  """
112
  new_prompt = st.text_area(prompt)
113
+
114
+ result = query_fact_check_api(user_input)
115
+ facts = response_break_out(result)
116
+
117
  if new_prompt:
118
  prompt = new_prompt
119
  answer = agent_chain.invoke(
120
+ prompt + "\n " + facts + "\n" + user_input,
121
  )
122
  display_response(answer)
123