import streamlit as st import random import time import hmac import bcrypt import os st.header(" Scientific Claim Verification ") def check_password(): """Returns `True` if the user had a correct password.""" def login_form(): """Form with widgets to collect user information""" with st.form("Credentials"): st.text_input("Username", key="username") st.text_input("Password", type="password", key="password") st.form_submit_button("Log in", on_click=password_entered) def password_entered(): """Checks whether a password entered by the user is correct.""" stored_password = os.getenv(st.session_state["username"]) if stored_password == st.session_state["password"]: st.session_state["password_correct"] = True del st.session_state["password"] # Remove credentials from session del st.session_state["username"] return # if st.session_state["username"] in st.secrets["passwords"]: # stored_hashed_password = st.secrets["passwords"][st.session_state["username"]] # Retrieved as a string # # Convert hashed password back to bytes if it's stored as a string # if isinstance(stored_hashed_password, str): # stored_hashed_password = stored_hashed_password.encode() # # Compare user-entered password (encoded) with stored hash # if bcrypt.checkpw(st.session_state["password"].encode(), stored_hashed_password): # st.session_state["password_correct"] = True # del st.session_state["password"] # Remove credentials from session # del st.session_state["username"] # return # If authentication fails st.session_state["password_correct"] = False # Return True if the username + password is validated. if st.session_state.get("password_correct", False): return True # Show inputs for username + password. login_form() if "password_correct" in st.session_state: st.error("😕 User not known or password incorrect") return False if not check_password(): st.stop() #Start of the Agentic Demo st.caption("Team UMBC-SBU-UT") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [{"role": "assistant", "content": "Let's start verifying the claims here! 👇"}] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) def retriever(query: str): """Simulate a 'retriever' step, searching for relevant information.""" with st.chat_message("assistant"): placeholder = st.empty() text="" message = "Retrieving the documents related to the claim..." for chunk in message.split(): text += chunk + " " time.sleep(0.05) # Add a blinking cursor to simulate typing placeholder.markdown(text + "▌") placeholder.markdown(text) # You could return retrieved info here. return message def reasoner(info: list[str]): """Simulate a 'reasoner' step, thinking about how to answer.""" with st.chat_message("assistant"): placeholder = st.empty() text="" message = "Reasoning and verifying the claim..." for chunk in message.split(): text += chunk + " " time.sleep(0.05) # Add a blinking cursor to simulate typing placeholder.markdown(text + "▌") placeholder.markdown(text) # You could return reasoning info here. return message # Accept user input if prompt := st.chat_input("Type here"): # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) retrieved_documents=retriever(prompt) reasoning = reasoner(retrieved_documents) # Display assistant response in chat message container with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" assistant_response = random.choice( [ "The claim is correct.", "The claim is incorrect.", ] ) # Simulate stream of response with milliseconds delay for chunk in assistant_response.split(): full_response += chunk + " " time.sleep(0.05) # Add a blinking cursor to simulate typing message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": full_response})