File size: 4,447 Bytes
ee96e07 fef584f ee96e07 fef584f b177ad3 0186a53 630fadf 9b00cba 0186a53 ee96e07 0186a53 ee96e07 fef584f ee96e07 b177ad3 fef584f 22f5fe6 fef584f 0c7dc6f 0186a53 0c7dc6f 0186a53 0c7dc6f fef584f 0c7dc6f 0186a53 fef584f 0c7dc6f fef584f 0186a53 fef584f 0186a53 0c7dc6f fef584f 0186a53 fef584f 0186a53 fef584f 0c7dc6f 0186a53 0c7dc6f fef584f 0186a53 fef584f 0186a53 fef584f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import os
import streamlit as st
from datetime import datetime
import requests
import uuid
from pydantic import BaseModel
# Placeholder personas
placeHolderPersona1 = """## Mission Statement
My mission is to utilize my expertise to aid in the medical triaging process by providing a clear, concise, and accurate assessment of potential arthritis related conditions."""
placeHolderPersona2 = """## Mission
To analyse a clinical triaging discussion between a patient and AI doctor interactions with a focus on Immunology symptoms, medical history, and test results to deduce the most probable Immunology diagnosis."""
# Mock API call function
class ChatRequestClient(BaseModel):
user_id: str
user_input: str
numberOfQuestions: int
persona1SystemMessage: str
persona2SystemMessage: str
llm1: str
tokens1: int
temperature1: float
userMessage2: str
llm2: str
tokens2: int
temperature2: float
def call_chat_api(data: ChatRequestClient):
return {
"content": f"Response to: {data.user_input}",
"elapsed_time": 0.5,
"count": 1,
"response_tokens": len(data.user_input.split()) # Mock token count
}
def format_elapsed_time(time):
return "{:.2f}".format(time)
# Layout with three columns
col1, col2, col3 = st.columns([1, 8, 1], gap="small") # Adjusted width ratios for better centering
# Left Column: Variables and Settings
with col1:
st.sidebar.image('cognizant_logo.jpg')
st.sidebar.header("RAG Query Designer")
st.sidebar.subheader("Query Translation Prompt")
numberOfQuestions = st.sidebar.slider("Number of Questions", 0, 10, 5, 1)
persona1SystemMessage = st.sidebar.text_area("Define Query Translation Prompt", value=placeHolderPersona1, height=300)
llm1 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'])
temp1 = st.sidebar.slider("Temperature", 0.0, 1.0, 0.6, 0.1)
tokens1 = st.sidebar.slider("Tokens", 0, 4000, 500, 100)
# Middle Column: Chat Interface
with col2:
st.markdown("<div style='text-align: center;'><h1>Experiment With Querys</h1></div>", unsafe_allow_html=True)
# User ID Input
user_id = st.text_input("User ID:", key="user_id")
if not user_id:
st.warning("Please provide an experiment ID to start the chat.")
else:
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat history in a container
with st.container():
for message in st.session_state.messages:
role = "User" if message["role"] == "user" else "Agent"
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input at the bottom
if user_input := st.chat_input("Start Chat:"):
# Add user message
st.session_state.messages.append({"role": "user", "content": user_input})
st.chat_message("user").markdown(user_input)
# Prepare data for API call
data = ChatRequestClient(
user_id=user_id,
user_input=user_input,
numberOfQuestions=numberOfQuestions,
persona1SystemMessage=persona1SystemMessage,
persona2SystemMessage=persona2SystemMessage,
llm1=llm1,
tokens1=tokens1,
temperature1=0.,
userMessage2="",
llm2="GPT-4",
tokens2=500,
temperature2=0.1,
)
# Call the API
response = call_chat_api(data)
# Process response
agent_message = response["content"]
elapsed_time = response["elapsed_time"]
count = response["count"]
response_tokens = response["response_tokens"]
# Add agent response
st.session_state.messages.append({"role": "assistant", "content": agent_message})
st.chat_message("assistant").markdown(agent_message)
# Right Column: Stats
with col3:
st.header("Stats")
if "elapsed_time" in locals() and "count" in locals():
st.markdown(f"**Time taken:** {format_elapsed_time(elapsed_time)} seconds")
st.markdown(f"**Question Count:** {count} of {numberOfQuestions}")
st.markdown(f"**Response Tokens:** {response_tokens}")
else:
st.markdown("No stats available yet.")
|