Doctor_v2 / azure_openai.py
davidfearne's picture
Update azure_openai.py
c23a7cd verified
import os
import pandas as pd
import streamlit as st
# from langchain.chat_models import AzureChatOpenAI
from langchain_openai import AzureChatOpenAI
from langchain_core.output_parsers import StrOutputParser, PydanticOutputParser
from langchain_core.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate
from pydantic import BaseModel, Field, validator
from langchain.output_parsers.enum import EnumOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableParallel
from enum import Enum
from helpers import read_md_files_from_doctors
# os.environ["LANGCHAIN_TRACING_V2"]="true"
# os.environ["LANGCHAIN_ENDPOINT"]="https://api.smith.langchain.com"
# LANGCHAIN_API_KEY = st.secrets['LANGCHAIN_API_KEY']
# os.environ["LANGCHAIN_PROJECT"]="UC2e2e"
# LLM Langchain Definition
OPENAI_API_KEY = st.secrets['azure_api_key']
OPENAI_API_TYPE = "azure"
OPENAI_API_BASE = "https://davidfearn-gpt4.openai.azure.com"
OPENAI_API_VERSION = "2024-08-01-preview"
OPENAI_MODEL = "gpt-4o-mini"
llm = AzureChatOpenAI(
openai_api_version=OPENAI_API_VERSION,
openai_api_key=OPENAI_API_KEY,
azure_endpoint=OPENAI_API_BASE,
openai_api_type=OPENAI_API_TYPE,
deployment_name=OPENAI_MODEL,
temperature=0,
)
# Function to read file contents
def read_file(folder, file):
"""
Reads the content of a text file and returns it as a string.
:param file: The file name to read from the 'assets' directory.
:return: The content of the file as a string or None if an error occurs.
"""
fp = f"{folder}/{file}.md"
try:
with open(fp, 'r', encoding='utf-8') as file:
content = file.read()
return content
except FileNotFoundError:
print(f"The file at {fp} was not found.")
except IOError:
print(f"An error occurred while reading the file at {fp}.")
return None
# Function to generate structured insights
def converse_with_patient(converstaional_history, latest_message):
# test coversational history if empty return string "first iteraction" else return converstaional history
if converstaional_history == False:
converstaional_history = "first iteraction"
SystemMessage = read_file("conversation", "intake_system")
UserMessage = "This is the conversational history between the patient and the virtual doctor if one exists {converstaional_history} and this is the latest message from the patient {latest_message}"
# class symptoms_capture(BaseModel):
# follow_up_question: str = Field(description="This feild is used to ask a question to refine your understanding of users symtoms")
system_message_template = SystemMessagePromptTemplate.from_template(SystemMessage)
# structured_llm = llm.with_structured_output(symptoms_capture)
prompt = ChatPromptTemplate.from_messages([system_message_template, UserMessage])
chain = prompt | llm | StrOutputParser()
return chain.invoke({"converstaional_history": converstaional_history, "latest_message": latest_message})
def diagnosis_consensus(diagnosis_results, conversational_history):
# Take the diagnosis results from the different models and generate a consensus diagnosis. The consensus diagnosis should be based on the confidence levels of the different models. The model with the highest confidence level should be given the most weight in the consensus diagnosis. The consensus diagnosis should be returned as a string.
class Diagnosis_Consensus(BaseModel):
diagnosis: str = Field(description="This field is used to give the diagnosis")
explanation: str = Field(description="This field is used to explain the diagnosis")
confidence: int = Field(description="This field is used to show the confidence level of the diagnosis")
system_message_template = SystemMessagePromptTemplate.from_template("You are a medical manager you will be given a number of diagnosis from different doctors as well as the original conversation that occurred between the triaging doctor and the patient. You will need to provide a consensus diagnosis based on the confidence levels of the different doctors. You need to provide a final diagnosis, an explanation of the diagnosis and you overall confidence in the diagnosis. ONLY USE THE INFORMATION PROVIDED TO YOU. ONLY CHOOSE ONE DIAGNOSIS")
structured_llm_consensus = llm.with_structured_output(Diagnosis_Consensus)
StandardUserMessage = "This is the conversational history between the patient and the virtual doctor {conversational_history}"
prompt = ChatPromptTemplate.from_messages([system_message_template, StandardUserMessage])
chain = prompt | structured_llm_consensus
response = chain.invoke({"conversational_history": diagnosis_results})
return {
"diagnosis": response.diagnosis,
"explanation": response.explanation,
"confidence": response.confidence
}
def evaluate_diagnosis(diagnosis_results, conversational_history):
#this is a simularity function that will compare the diagnosis results and the conversational history to known symtoms for this diagnosis and return a confidence level of the diagnosis
class Diagnosis_Evaluation(BaseModel):
confidence: int = Field(description="This field is used to show the confidence level of the diagnosis")
explanation: str = Field(description="This field is used to explain the confidence level of the diagnosis")
system_message_template = SystemMessagePromptTemplate.from_template(read_file("evaluator", "eval_system_message"))
StandardUserMessage = "This is the conversational history between the patient and the virtual doctor: {conversational_history}, this is the diagnosis our AI system has come up with: {diagnosis_results}"
structured_llm_consensus = llm.with_structured_output(Diagnosis_Evaluation)
prompt = ChatPromptTemplate.from_messages([system_message_template, StandardUserMessage])
chain = prompt | structured_llm_consensus
response = chain.invoke({"diagnosis_results": diagnosis_results, "conversational_history": conversational_history})
return {
"confidence": response.confidence,
"explanation": response.explanation
}
def next_best_action(diagnosis, conversational_history):
# Take the diagnosis and return the next best action for the patient. The next best action should be based on the diagnosis and the conversational history. The next best action should be returned as a string.
# Next best actions available are: Goto the GP / family doctor, goto a specialist, goto the hospital, go to the pharmacist or stay at home and rest.
class actions(str, Enum):
GP = "GP / family doctor"
SPECIALIST = "specialist"
HOSPITAL = "hospital"
PHARMACIST = "pharmacist"
REST = "stay at home and rest"
# Define the Pydantic model for the structured output
class next_best_action(BaseModel):
action: actions = Field(description="This field is used to give the next best action for the patient")
explanation: str = Field(description="This field is used to explain the next best action for the patient")
confidence: int = Field(description="This field is used to show the confidence level of the next best action")
system_message_template = SystemMessagePromptTemplate.from_template(read_file("nba", "nba_system"))
structured_llm_action = llm.with_structured_output(next_best_action)
StandardUserMessage = "This is the conversational history between the patient and the virtual doctor: {conversational_history}, and this is the diagnosis: {diagnosis}"
prompt = ChatPromptTemplate.from_messages([system_message_template, StandardUserMessage])
chain = prompt | structured_llm_action
response = chain.invoke({"diagnosis": diagnosis, "conversational_history": conversational_history})
return {"action": response.action.value, "explanation": response.explanation, "confidence": response.confidence}
def create_diagnosis(converstaional_history):
# Take the conversational history and generate a diagnosis. The diagnosis should pass the conversational history to a model specific to a medical specialty. This specialty will be provided by system messages in the doctors folder. The system messages will be in .md format.
class Diagnosis(BaseModel):
diagnosis: str = Field(description="This field is used to give the diagnosis")
confidence: int = Field(description="This field is used to show the confidence level of the diagnosis")
thinking: str = Field(description="This field is used to show the thinking of why you choose the diagnosis")
system_message_df = read_md_files_from_doctors()
structured_llm_diagnosis = llm.with_structured_output(Diagnosis)
StandardUserMessage = "This is the conversational history between the patient and the virtual doctor {conversational_history}"
# Extract the system messages from system_message_df content column and the name of the file from the filename column
# In parallel, run the system messages through the model to get the diagnosis
# For each system message in system_message_df, create a chain named by the content filename column
chains = {}
for index, row in system_message_df.iterrows():
system_message_template = SystemMessagePromptTemplate.from_template(row['content'])
prompt = ChatPromptTemplate.from_messages([system_message_template, StandardUserMessage])
chains[row['filename']] = prompt | structured_llm_diagnosis
map_chain = RunnableParallel(**chains)
response = map_chain.invoke({"conversational_history": converstaional_history})
diagnosis_results = {}
for filename, result in response.items():
filename_base = filename.split('.')[0]
diagnosis_results[f"{filename_base}, diagnosis"] = result.diagnosis
diagnosis_results[f"{filename_base}, thinking"] = result.thinking
diagnosis_results[f"{filename_base}, confidence"] = result.confidence
consensus = diagnosis_consensus(diagnosis_results, converstaional_history)
# Return the diagnosis and the confidence level of the diagnosis
# """{
# "confidence": response.confidence,
# "explanation": response.explanation
# }"""
print(consensus["confidence"])
if consensus["confidence"] < 80:
return "FAIL1", {
"diagnosis_completion": "fail",
"concensus_diagnosis": consensus["diagnosis"],
"expert_distribution": diagnosis_results,
"concensus_confidence": consensus["confidence"],
"concensus_thinking": consensus["explanation"],
"next_best_action_": "GP / family doctor",
"Error": "The confidence level of the consensus diagnosis is below 80%"
}
final_diagnosis = evaluate_diagnosis(consensus, converstaional_history)
# return the final diagnosis and the confidence level of the final diagnosis
# """{
# "diagnosis": response.diagnosis,
# "explanation": response.explanation,
# "confidence": response.confidence
# }"""
if final_diagnosis["confidence"] < 80:
return "FAIL2", {
"diagnosis_completion": "fail",
"concensus_diagnosis": consensus["diagnosis"],
"expert_distribution": diagnosis_results,
"concensus_confidence": consensus["confidence"],
"concensus_thinking": consensus["explanation"],
"evaluate_confidence": final_diagnosis["confidence"],
"evaluate_explanation": final_diagnosis["explanation"],
"next_best_action_": "GP / family doctor",
"Error": "The confidence level of the final diagnosis is below 80%"
}
next_action = next_best_action(consensus["diagnosis"], converstaional_history)
# return the next best action for the patient
# {"action": response.action, "explanation": response.explanation, "confidence": response.confidence}
return "SUCCESS", {
"diagnosis_completion": "success",
"concensus_diagnosis": consensus["diagnosis"],
"expert_distribution": diagnosis_results,
"concensus_confidence": consensus["confidence"],
"concensus_thinking": consensus["explanation"],
"evaluate_confidence": final_diagnosis["confidence"],
"evaluate_explanation": final_diagnosis["explanation"],
"next_best_action_": next_action["action"],
"next_best_action_explanation": next_action["explanation"],
"next_best_action_confidence": next_action["confidence"]
}
# conversation = """{'conversation_id': '12345623', 'patient': 'John Doe', 'conversation': [{'role': 'user', 'content': 'I have a pain in my lower right hand side.'}, {'role': 'assistant', 'content': 'how bad is the pain on a scale of 1 to 10?'}, {'role': 'user', 'content': '10 out of 10 and its lasted 2 hours'}, {'role': 'assistant', 'content': 'Does the pain radiate to any other part of your body?'}, {'role': 'user', 'content': 'No its just in my abdomen.'}, {'role': 'assistant', 'content': 'have you had any other symptoms like nausea or vomiting?'}, {'role': 'user', 'content': 'I do feel a bit sick'}, {'role': 'assistant', 'content': 'thank you for this information'}]}
# ('PASS', 4, {'conversation_id': '12345623', 'patient': 'John Doe', 'conversation': [{'role': 'user', 'content': 'I have a pain in my lower right hand side.'}, {'role': 'assistant', 'content': 'how bad is the pain on a scale of 1 to 10?'}, {'role': 'user', 'content': '10 out of 10 and its lasted 2 hours'}, {'role': 'assistant', 'content': 'Does the pain radiate
# to any other part of your body?'}, {'role': 'user', 'content': 'No its just in my abdomen.'}, {'role': 'assistant', 'content': 'have you had any other symptoms like nausea or vomiting?'}, {'role': 'user', 'content': 'I do feel a bit sick'}, {'role': 'assistant', 'content': 'thank you for this information'}]})"""
# print(create_diagnosis(conversation))