import os import streamlit as st from datetime import datetime import requests import uuid from pydantic import BaseModel # Placeholder personas placeHolderPersona1 = """## Mission Statement My mission is to utilize my expertise to aid in the medical triaging process by providing a clear, concise, and accurate assessment of potential arthritis related conditions.""" placeHolderPersona2 = """## Mission To analyse a clinical triaging discussion between a patient and AI doctor interactions with a focus on Immunology symptoms, medical history, and test results to deduce the most probable Immunology diagnosis.""" # Mock API call function class ChatRequestClient(BaseModel): user_id: str user_input: str numberOfQuestions: int persona1SystemMessage: str persona2SystemMessage: str llm1: str tokens1: int temperature1: float userMessage2: str llm2: str tokens2: int temperature2: float def call_chat_api(data: ChatRequestClient): return { "content": f"Response to: {data.user_input}", "elapsed_time": 0.5, "count": 1, "response_tokens": len(data.user_input.split()) # Mock token count } def format_elapsed_time(time): return "{:.2f}".format(time) # Layout with three columns col1, col2, col3 = st.columns([1, 3, 1], gap="small") # Adjusted width ratios for better centering # Left Column: Variables and Settings with col1: st.sidebar.image('cognizant_logo.jpg') st.sidebar.header("RAG Query Designer") st.sidebar.subheader("Query Translation Prompt") numberOfQuestions = st.sidebar.slider("Number of Questions", 0, 10, 5, 1) persona1SystemMessage = st.sidebar.text_area("Define Query Translation Prompt", value=placeHolderPersona1, height=300) llm1 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5']) temp1 = st.sidebar.slider("Temperature", 0.0, 1.0, 0.6, 0.1) tokens1 = st.sidebar.slider("Tokens", 0, 4000, 500, 100) # Middle Column: Chat Interface with col2: st.markdown("

Experiment With Querys

", unsafe_allow_html=True) # User ID Input user_id = st.text_input("User ID:", key="user_id") if not user_id: st.warning("Please provide an experiment ID to start the chat.") else: # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat history in a container with st.container(): for message in st.session_state.messages: role = "User" if message["role"] == "user" else "Agent" with st.chat_message(message["role"]): st.markdown(message["content"]) # Chat input at the bottom if user_input := st.chat_input("Start Chat:"): # Add user message st.session_state.messages.append({"role": "user", "content": user_input}) st.chat_message("user").markdown(user_input) # Prepare data for API call data = ChatRequestClient( user_id=user_id, user_input=user_input, numberOfQuestions=numberOfQuestions, persona1SystemMessage=persona1SystemMessage, persona2SystemMessage=persona2SystemMessage, llm1=llm1, tokens1=tokens1, temperature1=0., userMessage2="", llm2="GPT-4", tokens2=500, temperature2=0.1, ) # Call the API response = call_chat_api(data) # Process response agent_message = response["content"] elapsed_time = response["elapsed_time"] count = response["count"] response_tokens = response["response_tokens"] # Add agent response st.session_state.messages.append({"role": "assistant", "content": agent_message}) st.chat_message("assistant").markdown(agent_message) # Right Column: Stats with col3: st.header("Stats") if "elapsed_time" in locals() and "count" in locals(): st.markdown(f"**Time taken:** {format_elapsed_time(elapsed_time)} seconds") st.markdown(f"**Question Count:** {count} of {numberOfQuestions}") st.markdown(f"**Response Tokens:** {response_tokens}") else: st.markdown("No stats available yet.")