|
import os |
|
import streamlit as st |
|
from datetime import datetime |
|
import json |
|
import requests |
|
import uuid |
|
from datetime import date, datetime |
|
import requests |
|
from pydantic import BaseModel, Field |
|
from typing import Optional |
|
|
|
|
|
|
|
placeHolderPersona1 = """## Mission Statement |
|
My mission is to leverage my expertise to help diagnose and troubleshoot car issues by providing a clear, concise, and accurate assessment of potential car problems. |
|
|
|
# Assessment process |
|
|
|
Stick to asking questions related to identifying potential car issues. |
|
Ask only one question at a time. |
|
Provide context or clarification for your follow-up questions. |
|
Do not engage in conversation with the customer. |
|
Be as concise as possible. """ |
|
|
|
placeHolderPersona2 = """# Mission |
|
To thoroughly examine a potential issue with a customer's vehicle, the conversation between the customer and the AI car service provider should cover a wide range of inquiries. |
|
|
|
## Inquiry Process |
|
|
|
1. This includes general inquiries such as available services, pricing, location, hours of operation, and appointment booking. |
|
2. Specific services like oil changes, tire replacement, brake inspections, diagnosing potential car problems, and technical issues should be addressed. |
|
3. Inform the customer about special services such as car pick-up and drop-off, and offering loaner cars, to identify the most likely potential issue with the car's performance. |
|
|
|
# Limitations |
|
Although I specialize as a car service provider, I understand that some cases may not fit neatly within my expertise. If the inquiry suggests a condition outside of my area of knowledge, I will perform the best examination possible, bearing in mind that my confidence score will be influenced by the limitations of my specialization in those cases. |
|
|
|
class ChatRequestClient(BaseModel): |
|
|
|
user_id: str |
|
user_input: str |
|
numberOfQuestions: int |
|
welcomeMessage: str |
|
llm1: str |
|
tokens1: int |
|
temperature1: float |
|
persona1SystemMessage: str |
|
persona2SystemMessage: str |
|
userMessage2: str |
|
llm2: str |
|
tokens2: int |
|
temperature2: float |
|
|
|
def call_chat_api(data: ChatRequestClient): |
|
url = "https://agent-builder-api.greensea-b20be511.northeurope.azurecontainerapps.io/chat/" |
|
# Validate and convert the data to a dictionary |
|
validated_data = data.dict() |
|
|
|
# Make the POST request to the FastAPI server |
|
response = requests.post(url, json=validated_data) |
|
|
|
if response.status_code == 200: |
|
return response.json() # Return the JSON response if successful |
|
else: |
|
return "An error occured" # Return the raw response text if not successful |
|
|
|
def genuuid (): |
|
return uuid.uuid4() |
|
|
|
def format_elapsed_time(time): |
|
# Format the elapsed time to two decimal places |
|
return "{:.2f}".format(time) |
|
|
|
|
|
# Title of the application |
|
# st.image('agentBuilderLogo.png') |
|
st.title('LLM-Powered Agent Interaction') |
|
|
|
# Sidebar for inputting personas |
|
st.sidebar.image('cognizant_logo.jpg') |
|
st.sidebar.header("Agent Personas Design") |
|
# st.sidebar.subheader("Welcome Message") |
|
# welcomeMessage = st.sidebar.text_area("Define Triaging Persona", value=welcomeMessage, height=300) |
|
st.sidebar.subheader("Symptom Intake AI") |
|
numberOfQuestions = st.sidebar.slider("Number of Questions", min_value=0, max_value=10, step=1, value=5, key='persona1_questions') |
|
persona1SystemMessage = st.sidebar.text_area("Define Triaging Persona", value=placeHolderPersona1, height=300) |
|
with st.sidebar.expander("See explanation"): |
|
st.write("This AI persona will converse with the patient to gather their symptoms. With each round of chat, the object of the AI is to ask more specific follow up questions as it narrows down to the specific diagnosis. However this AI should never give a diagnosis") |
|
st.image("agentPersona1.png") |
|
llm1 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'], key='persona1_size') |
|
temp1 = st.sidebar.slider("Tempreature", min_value=0.0, max_value=1.0, step=0.1, value=0.6, key='persona1_temp') |
|
tokens1 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona1_tokens') |
|
|
|
# Persona 2 |
|
st.sidebar.subheader("Diagnosis and Next Best Action AI") |
|
persona2SystemMessage = st.sidebar.text_area("Define Diagnosis Persona", value=placeHolderPersona2, height=300) |
|
with st.sidebar.expander("See explanation"): |
|
st.write("This AI persona uses the output of the symptom intake AI as its input. This AI’s job is to augment a health professional by assisting with a diagnosis and possible next best action. The teams will need to determine if this should be a tool used directly by the patient, as an assistant to the health professional or a hybrid of the two. ") |
|
st.image("agentPersona2.png") |
|
llm2 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'], key='persona2_size') |
|
temp2 = st.sidebar.slider("Tempreature", min_value=0.0, max_value=1.0, step=0.1, value=0.5, key='persona2_temp') |
|
tokens2 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona2_tokens') |
|
userMessage2 = st.sidebar.text_area("Define User Message", value="This is the conversation todate, ", height=150) |
|
st.sidebar.caption(f"Session ID: {genuuid()}") |
|
# Main chat interface |
|
st.header("Chat with the Agents") |
|
user_id = st.text_input("User ID:", key="user_id") |
|
user_input = st.text_input("Write your message here:", key="user_input") |
|
|
|
if 'history' not in st.session_state: |
|
st.session_state.history = [] |
|
|
|
if st.button("Send"): |
|
# Placeholder for processing the input and generating a response |
|
data = ChatRequestClient( |
|
user_id=user_id, |
|
user_input=user_input, |
|
numberOfQuestions=numberOfQuestions, |
|
welcomeMessage="", |
|
llm1=llm1, |
|
tokens1=tokens1, |
|
temperature1=temp1, |
|
persona1SystemMessage=persona1SystemMessage, |
|
persona2SystemMessage=persona2SystemMessage, |
|
userMessage2=userMessage2, |
|
llm2=llm2, |
|
tokens2=tokens2, |
|
temperature2=temp2 |
|
) |
|
response = call_chat_api(data) |
|
|
|
|
|
st.markdown(f"##### Time take: {format_elapsed_time(response['elapsed_time'])}") |
|
st.markdown(f"##### Question Count : {response['count']} of {numberOfQuestions}") |
|
|
|
|
|
# {"count":count, "user_id":user_id,"time_stamp":time_stamp, "elapsed_time":elapsed_time, "content":content} |
|
|
|
st.session_state.history.append("You: " + user_input) |
|
|
|
st.session_state.history.append("Agent: " + response['content']) # Using 'response' after it's defined |
|
for message in st.session_state.history: |
|
st.write(message) |
|
|
|
|
|
|