Spaces:
Sleeping
Sleeping
import streamlit as st | |
import random | |
import time | |
import hmac | |
import os | |
import json | |
import requests | |
from llm_reasoner import LLMReasoner | |
from prompts import templates | |
from typing import Any | |
from string import Template | |
st.header(" Scientific Claim Verification ") | |
st.caption("Team UMBC-SBU-UT") | |
def safe_parse_json(model_answer): | |
"""..""" | |
try: | |
return json.loads(model_answer) | |
except json.JSONDecodeError as e: | |
logger.error("Failed to parse JSON: %s", e) | |
return None | |
def check_password(): | |
"""Returns `True` if the user had a correct password.""" | |
def login_form(): | |
"""Form with widgets to collect user information""" | |
with st.form("Credentials"): | |
st.text_input("Username", key="username") | |
st.text_input("Password", type="password", key="password") | |
st.form_submit_button("Log in", on_click=password_entered) | |
def password_entered(): | |
"""Checks whether a password entered by the user is correct.""" | |
stored_password = os.getenv(st.session_state["username"]) | |
if stored_password == st.session_state["password"]: | |
st.session_state["password_correct"] = True | |
del st.session_state["password"] # Remove credentials from session | |
del st.session_state["username"] | |
return | |
# If authentication fails | |
st.session_state["password_correct"] = False | |
# Return True if the username + password is validated. | |
if st.session_state.get("password_correct", False): | |
return True | |
# Show inputs for username + password. | |
login_form() | |
if "password_correct" in st.session_state: | |
st.error("π User not known or password incorrect") | |
return False | |
def select_models(): | |
"""Returns only when a valid option is selected from both dropdowns.""" | |
#placeholders | |
retriever_options = ["Choose one...", "BM25 Retriever", "Off-the-shelf Retriever", "Finetuned Retriever", "No Retriever"] | |
reasoner_options = ["Choose one...", "Claude Sonnet", "GPT-4o", "o3-mini"] | |
#selectboxes | |
retriever = st.selectbox( | |
"Select the Retriever Model", | |
retriever_options, | |
key="retriever" | |
) | |
reasoner = st.selectbox( | |
"Select the Reasoner Model", | |
reasoner_options, | |
key="reasoner" | |
) | |
#next button | |
if st.button("Next"): | |
# Check that both selections are not the placeholder. | |
if retriever == "Choose one..." or reasoner == "Choose one...": | |
st.info("Please select both a retriever and a reasoner.") | |
return None, None | |
else: | |
# Store the valid selections in session state | |
st.session_state["selected_models"] = (retriever, reasoner) | |
return retriever, reasoner | |
else: | |
st.info("Click 'Next' once you have made your selections.") | |
return None, None | |
if not check_password(): | |
st.stop() | |
if "selected_models" not in st.session_state: | |
selected_retriever, selected_reasoner = select_models() | |
# If valid selections are returned, store them and reset the change flag. | |
if selected_retriever is not None and selected_reasoner is not None: | |
st.session_state.selected_models = (selected_retriever, selected_reasoner) | |
st.rerun() | |
else: | |
st.stop() # Halt further execution until valid selections are made. | |
else: | |
selected_retriever, selected_reasoner = st.session_state.selected_models | |
#START OF AGENTIC DEMO | |
column1, column2 = st.columns(2) | |
column1.caption(f"Retriever Selected: {selected_retriever}") | |
column2.caption(f"Reasoner Selected: {selected_reasoner}") | |
if st.button("Change Selection", key="change_selection_btn"): | |
st.session_state.pop("selected_models", None) | |
st.session_state.pop("retriever", None) | |
st.session_state.pop("reasoner", None) | |
st.session_state.messages = [{"role": "assistant", "content": "Let's start verifying the claims here! π"}] | |
st.rerun() | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [{"role": "assistant", "content": "Let's start verifying the claims here! π"}] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
def retriever(query: str, selected_retriever: str): | |
"""Simulate a 'retriever' step, searching for relevant information.""" | |
with st.chat_message("assistant"): | |
placeholder = st.empty() | |
text="" | |
if selected_retriever == "BM25 Retriever": | |
message = "Using the BM25 retriever to search for documents related to your query..." | |
retriever_endpoint = "bm25" | |
elif selected_retriever == "Off-the-shelf Retriever": | |
message = "Using the off-the-shelf retriever to fetch detailed documents relevant to your query..." | |
retriever_endpoint = "ots" | |
elif selected_retriever == "Finetuned Retriever": | |
message = "Using the finetuned retriever to fetch detailed documents relevant to your query..." | |
retriever_endpoint = "ft" | |
else: | |
message = "No retriever selected. Skipping document retrieval." | |
return "" | |
headers = { | |
'Content-Type': 'application/json', | |
} | |
json_data = { | |
'claim': query, | |
} | |
url = "http://130.245.163.20" | |
port = "80" | |
response = requests.post(f'{url}:{port}/{retriever_endpoint}', headers=headers, json=json_data) | |
documents = response.json()["Documents"] | |
k = 3 | |
topk_documents = documents[:k] | |
corpus = '\n\n'.join(topk_documents) | |
for chunk in message.split(): | |
text += chunk + " " | |
time.sleep(0.05) | |
# Add a blinking cursor to simulate typing | |
placeholder.markdown(text + "β") | |
placeholder.markdown(text) | |
# You could return retrieved info here. | |
return corpus | |
def reasoner(query: str, documents: list[str], llm_client: Any): | |
"""Simulate a 'reasoner' step, thinking about how to answer.""" | |
with st.chat_message("assistant"): | |
placeholder = st.empty() | |
text="" | |
if selected_reasoner == "Claude Sonnet": | |
message = "Using Claude Sonnet to reason and verify the claim..." | |
elif selected_reasoner == "GPT-4o": | |
message = "Using GPT-4o to analyze and verify the claim in detail..." | |
elif selected_reasoner == "o3-mini": | |
message = "Using o3-mini to quickly analyze the claim..." | |
if not documents or len(documents) == 0: | |
prompt_template = Template(templates["no_evidence"]) | |
prompt = prompt_template.substitute(claim=query) | |
print(prompt) | |
# prompt = templates["no_evidence"].format(claim=query) | |
else: | |
# TODO: fix prompt call to include retrieved documents | |
prompt_template = Template(templates["with_evidence"]) | |
prompt = prompt_template.substitute(claim=query, corpus=documents) | |
# prompt = templates["no_evidence"].format(claim=query, corpus=documents) | |
llm_response = llm_client.run_inference(prompt) | |
answer_dict = safe_parse_json(llm_response) | |
decision = answer_dict.get("decision", "") | |
reasoning = answer_dict.get("reasoning", "") | |
for chunk in message.split(): | |
text += chunk + " " | |
time.sleep(0.05) | |
# Add a blinking cursor to simulate typing | |
placeholder.markdown(text + "β") | |
placeholder.markdown(text) | |
# You could return reasoning info here. | |
return reasoning, decision | |
# Accept user input | |
if prompt := st.chat_input("Type here"): | |
# Add user message to chat history | |
prompt = prompt + " \n"+ " \n"+ f"Retriever: {selected_retriever}, Reasoner: {selected_reasoner}" | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
options = {} | |
options["max_tokens"] = 500 | |
options["temperature"] = 0.0 | |
if selected_reasoner == "Claude Sonnet": | |
api_key = os.getenv("claude_key") | |
options["model_family"] = "Anthropic" | |
options["model_name"] = "claude-3-5-sonnet-20240620" | |
elif selected_reasoner == "GPT-4o": | |
api_key = os.getenv("openai_key") | |
options["model_family"] = "OpenAI" | |
options["model_name"] = "gpt-4o-2024-11-20" | |
elif selected_reasoner == "o3-mini": | |
api_key = os.getenv("openai_key") | |
options["model_family"] = "OpenAI" | |
options["model_name"] = "o3-mini-2025-01-31" | |
options["API_KEY"] = api_key | |
llm_client = LLMReasoner(options) | |
retrieved_documents = retriever(prompt, selected_retriever) | |
reasoning, decision = reasoner(prompt, retrieved_documents, llm_client) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
full_response = "" | |
if decision.lower() == 'support': | |
assistant_response = f'The claim is CORRECT because {reasoning}' | |
elif decision.lower() == 'contradict': | |
assistant_response = f'The claim is INCORRECT because {reasoning}' | |
# Simulate stream of response with milliseconds delay | |
for chunk in assistant_response.split(): | |
full_response += chunk + " " | |
time.sleep(0.05) | |
# Add a blinking cursor to simulate typing | |
message_placeholder.markdown(full_response + "β") | |
message_placeholder.markdown(full_response) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": full_response}) | |