davidfearne's picture
Update app.py
0186a53 verified
raw
history blame
4.78 kB
import os
import streamlit as st
from datetime import datetime
import requests
import uuid
from pydantic import BaseModel
# Placeholder personas
placeHolderPersona1 = """## Mission Statement
My mission is to utilize my expertise to aid in the medical triaging process by providing a clear, concise, and accurate assessment of potential arthritis related conditions."""
placeHolderPersona2 = """## Mission
To analyse a clinical triaging discussion between a patient and AI doctor interactions with a focus on Immunology symptoms, medical history, and test results to deduce the most probable Immunology diagnosis."""
# Mock API call function
class ChatRequestClient(BaseModel):
user_id: str
user_input: str
numberOfQuestions: int
persona1SystemMessage: str
persona2SystemMessage: str
llm1: str
tokens1: int
temperature1: float
userMessage2: str
llm2: str
tokens2: int
temperature2: float
def call_chat_api(data: ChatRequestClient):
return {
"content": f"Response to: {data.user_input}",
"elapsed_time": 0.5,
"count": 1,
"response_tokens": len(data.user_input.split()) # Mock token count
}
def format_elapsed_time(time):
return "{:.2f}".format(time)
# Layout with three columns
col1, col2, col3 = st.columns([1, 3, 1]) # Adjusted width ratios for better centering
# Left Column: Variables and Settings
with col1:
st.sidebar.image('cognizant_logo.jpg')
st.sidebar.header("Agent Personas Design")
st.sidebar.subheader("Intake AI")
numberOfQuestions = st.sidebar.slider("Number of Questions", 0, 10, 5, 1)
persona1SystemMessage = st.sidebar.text_area("Define Intake Persona", value=placeHolderPersona1, height=300)
llm1 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'])
temp1 = st.sidebar.slider("Temperature", 0.0, 1.0, 0.6, 0.1)
tokens1 = st.sidebar.slider("Tokens", 0, 4000, 500, 100)
persona2SystemMessage = st.sidebar.text_area("Define Recommendation Persona", value=placeHolderPersona2, height=300)
llm2 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'], key="persona2")
temp2 = st.sidebar.slider("Temperature", 0.0, 1.0, 0.5, 0.1, key="temp2")
tokens2 = st.sidebar.slider("Tokens", 0, 4000, 500, 100, key="tokens2")
# Middle Column: Chat Interface
with col2:
st.markdown("<div style='text-align: center;'><h1>Chat with the Agents</h1></div>", unsafe_allow_html=True)
# User ID Input
user_id = st.text_input("User ID:", key="user_id")
if not user_id:
st.warning("Please provide a User ID to start the chat.")
else:
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat history in a container
with st.container():
for message in st.session_state.messages:
role = "User" if message["role"] == "user" else "Agent"
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input at the bottom
if user_input := st.chat_input("Write your message here:"):
# Add user message
st.session_state.messages.append({"role": "user", "content": user_input})
st.chat_message("user").markdown(user_input)
# Prepare data for API call
data = ChatRequestClient(
user_id=user_id,
user_input=user_input,
numberOfQuestions=numberOfQuestions,
persona1SystemMessage=persona1SystemMessage,
persona2SystemMessage=persona2SystemMessage,
llm1=llm1,
tokens1=tokens1,
temperature1=temp1,
userMessage2="",
llm2=llm2,
tokens2=tokens2,
temperature2=temp2,
)
# Call the API
response = call_chat_api(data)
# Process response
agent_message = response["content"]
elapsed_time = response["elapsed_time"]
count = response["count"]
response_tokens = response["response_tokens"]
# Add agent response
st.session_state.messages.append({"role": "assistant", "content": agent_message})
st.chat_message("assistant").markdown(agent_message)
# Right Column: Stats
with col3:
st.header("Stats")
if "elapsed_time" in locals() and "count" in locals():
st.markdown(f"**Time taken:** {format_elapsed_time(elapsed_time)} seconds")
st.markdown(f"**Question Count:** {count} of {numberOfQuestions}")
st.markdown(f"**Response Tokens:** {response_tokens}")
else:
st.markdown("No stats available yet.")