import streamlit as st import random import time st.header(" VERIFYING SCIENTIFIC CLAIMS ") st.caption("Version 0.1") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [{"role": "assistant", "content": "Let's start verifying the claims here! 👇"}] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) def retriever(query: str): """Simulate a 'retriever' step, searching for relevant information.""" with st.chat_message("assistant"): placeholder = st.empty() text="" message = "Retriving the documents related to the claim..." for chunk in message.split(): text += chunk + " " time.sleep(0.05) # Add a blinking cursor to simulate typing placeholder.markdown(text + "▌") placeholder.markdown(text) # You could return retrieved info here. return message def reasoner(info: str): """Simulate a 'reasoner' step, thinking about how to answer.""" with st.chat_message("assistant"): placeholder = st.empty() text="" message = "Reasoning and verifying the claim..." for chunk in message.split(): text += chunk + " " time.sleep(0.05) # Add a blinking cursor to simulate typing placeholder.markdown(text + "▌") placeholder.markdown(text) # You could return reasoning info here. return message # Accept user input if prompt := st.chat_input("40mg/day dosage of folic acid and 2mg/day dosage of vitamin B12 does not affect chronic kidney disease (CKD) progression."): # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) #Calling retriever retriever(prompt) #Calling reasoner reasoner(prompt) # Display assistant response in chat message container with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" assistant_response = random.choice( [ "The claim is correct.", "The claim is incorrect.", ] ) # Simulate stream of response with milliseconds delay for chunk in assistant_response.split(): full_response += chunk + " " time.sleep(0.05) # Add a blinking cursor to simulate typing message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": full_response})