Spaces:
Running
Running
Upload 5 files
Browse files- app.py +699 -0
- azure_openai.py +253 -0
- helpers.py +19 -0
- memory.py +184 -0
- requirements.txt +13 -0
app.py
ADDED
@@ -0,0 +1,699 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException
|
2 |
+
import streamlit as st
|
3 |
+
import pandas as pd
|
4 |
+
from pydantic import BaseModel, Field, validator
|
5 |
+
import numpy as np
|
6 |
+
import plotly.graph_objects as go
|
7 |
+
|
8 |
+
from azure_openai import converse_with_patient, create_diagnosis
|
9 |
+
from memory import get_conversation, store_conversation, update_conversation
|
10 |
+
import uuid
|
11 |
+
|
12 |
+
class ask_question (BaseModel):
|
13 |
+
user_input: str
|
14 |
+
id: str
|
15 |
+
|
16 |
+
app = FastAPI()
|
17 |
+
|
18 |
+
|
19 |
+
def generate_expert_confidence_chart(diagnosis):
|
20 |
+
"""
|
21 |
+
Extracts expert confidence data from JSON and generates a multi-colored bar chart.
|
22 |
+
"""
|
23 |
+
|
24 |
+
# Extract expert distribution data
|
25 |
+
expert_distribution = diagnosis["expert_distribution"]
|
26 |
+
|
27 |
+
# Process the data into a structured format
|
28 |
+
rows = []
|
29 |
+
for key, value in expert_distribution.items():
|
30 |
+
expert, attribute = key.rsplit(", ", 1) # Ensure splitting at the last comma
|
31 |
+
rows.append({"Expert": expert, "Attribute": attribute, "Value": value})
|
32 |
+
|
33 |
+
# Create a DataFrame
|
34 |
+
df = pd.DataFrame(rows)
|
35 |
+
|
36 |
+
# Filter the DataFrame for confidence values only
|
37 |
+
df_confidence = df[df["Attribute"] == "confidence"].copy()
|
38 |
+
|
39 |
+
# Merge confidence values with corresponding thinking explanations
|
40 |
+
df_thinking = df[df["Attribute"] == "thinking"].copy()
|
41 |
+
df_confidence = df_confidence.merge(df_thinking, on="Expert", suffixes=("_confidence", "_thinking"))
|
42 |
+
|
43 |
+
# Convert confidence values to numeric
|
44 |
+
df_confidence["Value_confidence"] = pd.to_numeric(df_confidence["Value_confidence"])
|
45 |
+
|
46 |
+
# Define a function to map confidence scores to colors
|
47 |
+
def confidence_to_color(confidence):
|
48 |
+
"""
|
49 |
+
Maps confidence score (0-100) to a blended color between red (0 confidence) and green (100 confidence).
|
50 |
+
"""
|
51 |
+
red = np.array([255, 0, 0])
|
52 |
+
green = np.array([0, 255, 0])
|
53 |
+
blend_ratio = confidence / 100 # Normalize between 0 and 1
|
54 |
+
blended_color = (1 - blend_ratio) * red + blend_ratio * green
|
55 |
+
return f"rgb({int(blended_color[0])}, {int(blended_color[1])}, {int(blended_color[2])})"
|
56 |
+
|
57 |
+
# Apply color mapping
|
58 |
+
df_confidence["Color"] = df_confidence["Value_confidence"].apply(confidence_to_color)
|
59 |
+
|
60 |
+
# Create the bar chart
|
61 |
+
fig = go.Figure()
|
62 |
+
|
63 |
+
# Add bars with customized colors and reduced spacing
|
64 |
+
fig.add_trace(go.Bar(
|
65 |
+
y=df_confidence["Expert"],
|
66 |
+
x=df_confidence["Value_confidence"],
|
67 |
+
text=df_confidence["Value_confidence"],
|
68 |
+
hovertext=df_confidence["Value_thinking"],
|
69 |
+
orientation="h",
|
70 |
+
marker=dict(color=df_confidence["Color"]),
|
71 |
+
width=0.3, # Reduce bar width for closer spacing
|
72 |
+
textposition="inside"
|
73 |
+
))
|
74 |
+
|
75 |
+
# Update layout for better visibility
|
76 |
+
fig.update_layout(
|
77 |
+
title="Expert Confidence in Diagnosis",
|
78 |
+
xaxis_title="Confidence Score",
|
79 |
+
yaxis_title="Medical Expert",
|
80 |
+
yaxis=dict(tickmode="linear", dtick=1, automargin=True),
|
81 |
+
height=max(400, 40 * len(df_confidence)), # Adjust height dynamically
|
82 |
+
bargap=0.1 # Reduce spacing between bars
|
83 |
+
)
|
84 |
+
|
85 |
+
# Update hover template
|
86 |
+
fig.update_traces(
|
87 |
+
hovertemplate="<b>%{y}</b><br>Confidence: %{x}%<br>Thinking: %{hovertext}"
|
88 |
+
)
|
89 |
+
|
90 |
+
# Show the plot
|
91 |
+
return fig
|
92 |
+
|
93 |
+
|
94 |
+
# FastAPI interface routes
|
95 |
+
# @app.get("/")
|
96 |
+
# async def root():
|
97 |
+
# return {"message": "Welcome to the GenAI Symptom Checker"}
|
98 |
+
|
99 |
+
# @app.post("/ask")
|
100 |
+
# async def ask_question(question: ask_question):
|
101 |
+
# try:
|
102 |
+
# user_input = question.user_input
|
103 |
+
# conversation_id = question.id
|
104 |
+
|
105 |
+
# exists, count, conversation_obj = get_conversation(conversation_id)
|
106 |
+
# if count == 6:
|
107 |
+
# response = converse_with_patient(conversation_obj, user_input)
|
108 |
+
# store_conversation(conversation_id, conversation_id, user_input, response)
|
109 |
+
# exists, count, conversation_obj = get_conversation(conversation_id)
|
110 |
+
# diagnosis = create_diagnosis(conversation_obj)
|
111 |
+
# return {"response": response, "count": count, "diagnosis": diagnosis}
|
112 |
+
# if count > 6:
|
113 |
+
# exists, count, conversation_obj = get_conversation(conversation_id)
|
114 |
+
# diagnosis_content = next((item['content'] for item in conversation_obj if item['role'] == 'diagnosis'), None)
|
115 |
+
# return {"response": "You have reached the maximum number of questions", "count": count, "diagnosis": diagnosis_content}
|
116 |
+
# if exists == "PASS":
|
117 |
+
# response = converse_with_patient(conversation_obj, user_input)
|
118 |
+
# update_conversation(conversation_id, conversation_id, user_input, response)
|
119 |
+
# return {"response": response, "count": count, "diagnosis": "none"}
|
120 |
+
|
121 |
+
# else:
|
122 |
+
# response = converse_with_patient("",user_input)
|
123 |
+
# store_conversation(conversation_id, conversation_id, user_input, response)
|
124 |
+
# return {"response": response, "count": count, "diagnosis": "none"}
|
125 |
+
|
126 |
+
# except Exception as e:
|
127 |
+
# raise HTTPException(status_code=500, detail=str(e))
|
128 |
+
|
129 |
+
# app config
|
130 |
+
|
131 |
+
st.set_page_config(page_title="virtual clinician", page_icon=":medical_symbol:")
|
132 |
+
st.title("Virtual Clinician :medical_symbol:")
|
133 |
+
|
134 |
+
user_id = st.text_input("Name:", key="user_id")
|
135 |
+
|
136 |
+
conversation_id = user_id
|
137 |
+
# Ensure user_id is defined or fallback to a default value
|
138 |
+
if not user_id:
|
139 |
+
st.warning("Hi, Who am I speaking with?")
|
140 |
+
else:
|
141 |
+
# session state
|
142 |
+
if "chat_history" not in st.session_state:
|
143 |
+
st.session_state.chat_history = [
|
144 |
+
{"role": "AI", "content": f"Hello, {user_id} I am the virtual clinician. How can I help you today?"},
|
145 |
+
]
|
146 |
+
|
147 |
+
|
148 |
+
# conversation
|
149 |
+
for message in st.session_state.chat_history:
|
150 |
+
if message["role"] == "AI":
|
151 |
+
with st.chat_message("AI"):
|
152 |
+
st.write(message["content"])
|
153 |
+
elif message["role"] == "Human":
|
154 |
+
with st.chat_message("Human"):
|
155 |
+
st.write(message["content"])
|
156 |
+
|
157 |
+
# user input
|
158 |
+
user_input = st.chat_input("Type your message here...")
|
159 |
+
if user_input is not None and user_input != "":
|
160 |
+
st.session_state.chat_history.append({"role": "Human", "content": user_input})
|
161 |
+
|
162 |
+
|
163 |
+
with st.chat_message("Human"):
|
164 |
+
st.markdown(user_input)
|
165 |
+
|
166 |
+
# this functions checks to see if the conversation exists
|
167 |
+
exists, count, conversation_obj = get_conversation(conversation_id)
|
168 |
+
# if the conversation does not exist, it creates a new conversation
|
169 |
+
|
170 |
+
|
171 |
+
if count > 5:
|
172 |
+
response = converse_with_patient(conversation_obj, user_input)
|
173 |
+
conversation_obj = update_conversation(conversation_id, user_input, response)
|
174 |
+
print(conversation_obj)
|
175 |
+
with st.spinner("Creating a diagnosis..."):
|
176 |
+
outcome, diagnosis = create_diagnosis(conversation_obj)
|
177 |
+
if outcome == "SUCCESS":
|
178 |
+
st.subheader("Diagnosis Summary")
|
179 |
+
st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
|
180 |
+
st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
|
181 |
+
st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
|
182 |
+
st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
|
183 |
+
st.write(f"**Next Best Action:** {diagnosis['next_best_action_']}")
|
184 |
+
st.write(f"**Next Best Action Explanation:** {diagnosis['next_best_action_explanation']}")
|
185 |
+
st.write(f"**Next Best Action Confidence:** {diagnosis['next_best_action_confidence']}%")
|
186 |
+
|
187 |
+
# Generate and display the plotly chart
|
188 |
+
st.subheader("Expert Confidence Levels")
|
189 |
+
fig = generate_expert_confidence_chart(diagnosis)
|
190 |
+
st.plotly_chart(fig)
|
191 |
+
|
192 |
+
# if the diagnosis is not successful, display a message
|
193 |
+
if outcome == "FAIL1":
|
194 |
+
st.write("Diagnosis not available Failed to find concensus")
|
195 |
+
st.subheader("Incomplete Diagnosis")
|
196 |
+
st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
|
197 |
+
st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
|
198 |
+
st.write(f"**Next Best Action:** See GP")
|
199 |
+
st.write(f"**Next Best Action Explanation:** Please give more details to help the AI better understand your symptoms ")
|
200 |
+
|
201 |
+
# Generate and display the plotly chart
|
202 |
+
st.subheader("Expert Confidence Levels")
|
203 |
+
fig = generate_expert_confidence_chart(diagnosis)
|
204 |
+
st.plotly_chart(fig)
|
205 |
+
|
206 |
+
if outcome == "FAIL1":
|
207 |
+
st.write("Diagnosis not available Failed to match described symptoms with know symptoms for AI diagnosis")
|
208 |
+
st.subheader("Incomplete Diagnosis")
|
209 |
+
st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
|
210 |
+
st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
|
211 |
+
st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
|
212 |
+
st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
|
213 |
+
st.write(f"**Next Best Action:** See GP")
|
214 |
+
st.write(f"**Next Best Action Explanation:** Please give more details to help the AI better understand your symptoms ")
|
215 |
+
|
216 |
+
# Generate and display the plotly chart
|
217 |
+
st.subheader("Expert Confidence Levels")
|
218 |
+
fig = generate_expert_confidence_chart(diagnosis)
|
219 |
+
st.plotly_chart(fig)
|
220 |
+
|
221 |
+
if exists == "PASS":
|
222 |
+
response = converse_with_patient(conversation_obj, user_input)
|
223 |
+
update_conversation(conversation_id, user_input, response)
|
224 |
+
st.session_state.chat_history.append({"role": "AI", "content": response})
|
225 |
+
with st.chat_message("AI"):
|
226 |
+
st.write(response)
|
227 |
+
|
228 |
+
else:
|
229 |
+
response = converse_with_patient("",user_input)
|
230 |
+
store_conversation(conversation_id, user_input, response)
|
231 |
+
st.session_state.chat_history.append({"role": "AI", "content": response})
|
232 |
+
with st.chat_message("AI"):
|
233 |
+
st.write(response)
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
|
238 |
+
|
239 |
+
|
240 |
+
|
241 |
+
|
242 |
+
|
243 |
+
|
244 |
+
# if exists == "FAIL":
|
245 |
+
# response = converse_with_patient("",user_query)
|
246 |
+
# store_conversation(conversation_id, user_query, response)
|
247 |
+
# st.session_state.chat_history.append({"role": "AI", "content": response})
|
248 |
+
|
249 |
+
# # if the conversation exists use it to inform the AI's context
|
250 |
+
# response = converse_with_patient(st.session_state.chat_history, user_query)
|
251 |
+
# # update the conversation with the new response
|
252 |
+
# update_conversation(conversation_id, user_query, response)
|
253 |
+
# once 6 interactions have been made, the AI will generate a diagnosis
|
254 |
+
# if count > 6:
|
255 |
+
# # write last question to the chat log
|
256 |
+
# st.session_state.chat_history.append({"role": "Human", "content": user_query})
|
257 |
+
# # get an AI response
|
258 |
+
# response_data = converse_with_patient(st.session_state.chat_history, user_query)
|
259 |
+
# # write AI response to the chat
|
260 |
+
# st.session_state.chat_history.append({"role": "AI", "content": response_data})
|
261 |
+
# # send conversation to the AI to generate a diagnosis
|
262 |
+
# outcome, diagnosis = create_diagnosis(conversation_obj)
|
263 |
+
# # if the diagnosis is successful, display the diagnosis data
|
264 |
+
# if outcome == "PASS":
|
265 |
+
# st.subheader("Diagnosis Summary")
|
266 |
+
# st.write(f"**Consensus Confidence:** {['concensus_confidence']}%")
|
267 |
+
# st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
|
268 |
+
# st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
|
269 |
+
# st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
|
270 |
+
# st.write(f"**Next Best Action:** {diagnosis['next_best_action_']}")
|
271 |
+
# st.write(f"**Next Best Action Explanation:** {diagnosis['next_best_action_explanation']}")
|
272 |
+
# st.write(f"**Next Best Action Confidence:** {diagnosis['next_best_action_confidence']}%")
|
273 |
+
|
274 |
+
# # Generate and display the plotly chart
|
275 |
+
# st.subheader("Expert Confidence Levels")
|
276 |
+
# fig = generate_expert_confidence_chart(diagnosis)
|
277 |
+
# st.plotly_chart(fig)
|
278 |
+
|
279 |
+
# # if the diagnosis is not successful, display a message
|
280 |
+
# else:
|
281 |
+
# st.write("Diagnosis not available")
|
282 |
+
|
283 |
+
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
# from fastapi import FastAPI, HTTPException
|
290 |
+
# import streamlit as st
|
291 |
+
# import pandas as pd
|
292 |
+
# from pydantic import BaseModel
|
293 |
+
# import numpy as np
|
294 |
+
# import plotly.graph_objects as go
|
295 |
+
|
296 |
+
# from azure_openai import converse_with_patient, create_diagnosis
|
297 |
+
# from memory import get_conversation, store_conversation, update_conversation, retrieve_conversation
|
298 |
+
|
299 |
+
# class AskQuestion(BaseModel):
|
300 |
+
# user_input: str
|
301 |
+
# id: str
|
302 |
+
|
303 |
+
# app = FastAPI()
|
304 |
+
|
305 |
+
# def generate_expert_confidence_chart(diagnosis):
|
306 |
+
# """
|
307 |
+
# Extracts expert confidence data from JSON and generates a multi-colored bar chart.
|
308 |
+
# """
|
309 |
+
# expert_distribution = diagnosis.get("expert_distribution", {})
|
310 |
+
# rows = []
|
311 |
+
# for key, value in expert_distribution.items():
|
312 |
+
# expert, attribute = key.rsplit(", ", 1)
|
313 |
+
# rows.append({"Expert": expert, "Attribute": attribute, "Value": value})
|
314 |
+
# df = pd.DataFrame(rows)
|
315 |
+
# df_confidence = df[df["Attribute"] == "confidence"].copy()
|
316 |
+
# df_thinking = df[df["Attribute"] == "thinking"].copy()
|
317 |
+
# df_confidence = df_confidence.merge(df_thinking, on="Expert", suffixes=("_confidence", "_thinking"))
|
318 |
+
# df_confidence["Value_confidence"] = pd.to_numeric(df_confidence["Value_confidence"])
|
319 |
+
|
320 |
+
# def confidence_to_color(confidence):
|
321 |
+
# red = np.array([255, 0, 0])
|
322 |
+
# green = np.array([0, 255, 0])
|
323 |
+
# blend_ratio = confidence / 100
|
324 |
+
# blended_color = (1 - blend_ratio) * red + blend_ratio * green
|
325 |
+
# return f"rgb({int(blended_color[0])}, {int(blended_color[1])}, {int(blended_color[2])})"
|
326 |
+
|
327 |
+
# df_confidence["Color"] = df_confidence["Value_confidence"].apply(confidence_to_color)
|
328 |
+
# fig = go.Figure()
|
329 |
+
# fig.add_trace(go.Bar(
|
330 |
+
# y=df_confidence["Expert"],
|
331 |
+
# x=df_confidence["Value_confidence"],
|
332 |
+
# text=df_confidence["Value_confidence"],
|
333 |
+
# hovertext=df_confidence["Value_thinking"],
|
334 |
+
# orientation="h",
|
335 |
+
# marker=dict(color=df_confidence["Color"]),
|
336 |
+
# width=0.3,
|
337 |
+
# textposition="inside"
|
338 |
+
# ))
|
339 |
+
# fig.update_layout(
|
340 |
+
# title="Expert Confidence in Diagnosis",
|
341 |
+
# xaxis_title="Confidence Score",
|
342 |
+
# yaxis_title="Medical Expert",
|
343 |
+
# yaxis=dict(tickmode="linear", dtick=1, automargin=True),
|
344 |
+
# height=max(400, 40 * len(df_confidence)),
|
345 |
+
# bargap=0.1
|
346 |
+
# )
|
347 |
+
# fig.update_traces(
|
348 |
+
# hovertemplate="<b>%{y}</b><br>Confidence: %{x}%<br>Thinking: %{hovertext}"
|
349 |
+
# )
|
350 |
+
# return fig
|
351 |
+
|
352 |
+
# conversation_id = "111a1"
|
353 |
+
# st.set_page_config(page_title="Virtual Clinician", page_icon="🤖")
|
354 |
+
# st.title("Virtual Clinician :toolbox:")
|
355 |
+
|
356 |
+
# if "chat_history" not in st.session_state:
|
357 |
+
# st.session_state.chat_history = [
|
358 |
+
# {"role": "AI", "content": "Hello, I am the virtual clinician. How can I help you today?"},
|
359 |
+
# ]
|
360 |
+
|
361 |
+
# for message in st.session_state.chat_history:
|
362 |
+
# with st.chat_message(message["role"]):
|
363 |
+
# st.write(message["content"])
|
364 |
+
|
365 |
+
# user_query = st.chat_input("Type your message here...")
|
366 |
+
# if user_query:
|
367 |
+
# st.session_state.chat_history.append({"role": "Human", "content": user_query})
|
368 |
+
# with st.chat_message("Human"):
|
369 |
+
# st.markdown(user_query)
|
370 |
+
|
371 |
+
# exists, count, conversation_obj = get_conversation(conversation_id)
|
372 |
+
|
373 |
+
# if exists == "FAIL":
|
374 |
+
# response_data = converse_with_patient("", user_query)
|
375 |
+
# st.session_state.chat_history.append({"role": "AI", "content": response_data})
|
376 |
+
# store_conversation(conversation_id, user_query, response_data)
|
377 |
+
|
378 |
+
|
379 |
+
# else:
|
380 |
+
# response_data = converse_with_patient(st.session_state.chat_history, user_query)
|
381 |
+
# st.session_state.chat_history.append({"role": "AI", "content": response_data})
|
382 |
+
# update_conversation(conversation_id, conversation_id, user_query, response_data)
|
383 |
+
|
384 |
+
|
385 |
+
# if count >= 6:
|
386 |
+
# st.session_state.chat_history.append({"role": "Human", "content": user_query})
|
387 |
+
# response_data = converse_with_patient(st.session_state.chat_history, user_query)
|
388 |
+
# st.session_state.chat_history.append({"role": "AI", "content": response_data})
|
389 |
+
# outcome, diagnosis = create_diagnosis(conversation_obj)
|
390 |
+
# if outcome == "PASS":
|
391 |
+
# st.subheader("Diagnosis Summary")
|
392 |
+
# st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
|
393 |
+
# st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
|
394 |
+
# st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
|
395 |
+
# st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
|
396 |
+
# st.write(f"**Next Best Action:** {diagnosis['next_best_action_']}")
|
397 |
+
# st.write(f"**Next Best Action Explanation:** {diagnosis['next_best_action_explanation']}")
|
398 |
+
# st.write(f"**Next Best Action Confidence:** {diagnosis['next_best_action_confidence']}%")
|
399 |
+
# st.subheader("Expert Confidence Levels")
|
400 |
+
# fig = generate_expert_confidence_chart(diagnosis)
|
401 |
+
# st.plotly_chart(fig)
|
402 |
+
# else:
|
403 |
+
# st.write("Diagnosis not available")
|
404 |
+
|
405 |
+
|
406 |
+
# st.session_state.chat_history.append({"role": "AI", "content": response_data})
|
407 |
+
# with st.chat_message("AI"):
|
408 |
+
# st.write(response_data)
|
409 |
+
|
410 |
+
# from fastapi import FastAPI, HTTPException
|
411 |
+
# import streamlit as st
|
412 |
+
# import pandas as pd
|
413 |
+
# from pydantic import BaseModel
|
414 |
+
# import numpy as np
|
415 |
+
# import plotly.graph_objects as go
|
416 |
+
|
417 |
+
# from azure_openai import converse_with_patient, create_diagnosis
|
418 |
+
# from memory import get_conversation, store_conversation, update_conversation, retrieve_conversation
|
419 |
+
|
420 |
+
# class AskQuestion(BaseModel):
|
421 |
+
# user_input: str
|
422 |
+
# id: str
|
423 |
+
|
424 |
+
# app = FastAPI()
|
425 |
+
|
426 |
+
# def generate_expert_confidence_chart(diagnosis):
|
427 |
+
# """
|
428 |
+
# Extracts expert confidence data from JSON and generates a multi-colored bar chart.
|
429 |
+
# """
|
430 |
+
# expert_distribution = diagnosis.get("expert_distribution", {})
|
431 |
+
# rows = []
|
432 |
+
# for key, value in expert_distribution.items():
|
433 |
+
# expert, attribute = key.rsplit(", ", 1)
|
434 |
+
# rows.append({"Expert": expert, "Attribute": attribute, "Value": value})
|
435 |
+
# df = pd.DataFrame(rows)
|
436 |
+
# df_confidence = df[df["Attribute"] == "confidence"].copy()
|
437 |
+
# df_thinking = df[df["Attribute"] == "thinking"].copy()
|
438 |
+
# df_confidence = df_confidence.merge(df_thinking, on="Expert", suffixes=("_confidence", "_thinking"))
|
439 |
+
# df_confidence["Value_confidence"] = pd.to_numeric(df_confidence["Value_confidence"])
|
440 |
+
|
441 |
+
# def confidence_to_color(confidence):
|
442 |
+
# red = np.array([255, 0, 0])
|
443 |
+
# green = np.array([0, 255, 0])
|
444 |
+
# blend_ratio = confidence / 100
|
445 |
+
# blended_color = (1 - blend_ratio) * red + blend_ratio * green
|
446 |
+
# return f"rgb({int(blended_color[0])}, {int(blended_color[1])}, {int(blended_color[2])})"
|
447 |
+
|
448 |
+
# df_confidence["Color"] = df_confidence["Value_confidence"].apply(confidence_to_color)
|
449 |
+
# fig = go.Figure()
|
450 |
+
# fig.add_trace(go.Bar(
|
451 |
+
# y=df_confidence["Expert"],
|
452 |
+
# x=df_confidence["Value_confidence"],
|
453 |
+
# text=df_confidence["Value_confidence"],
|
454 |
+
# hovertext=df_confidence["Value_thinking"],
|
455 |
+
# orientation="h",
|
456 |
+
# marker=dict(color=df_confidence["Color"]),
|
457 |
+
# width=0.3,
|
458 |
+
# textposition="inside"
|
459 |
+
# ))
|
460 |
+
# fig.update_layout(
|
461 |
+
# title="Expert Confidence in Diagnosis",
|
462 |
+
# xaxis_title="Confidence Score",
|
463 |
+
# yaxis_title="Medical Expert",
|
464 |
+
# yaxis=dict(tickmode="linear", dtick=1, automargin=True),
|
465 |
+
# height=max(400, 40 * len(df_confidence)),
|
466 |
+
# bargap=0.1
|
467 |
+
# )
|
468 |
+
# fig.update_traces(
|
469 |
+
# hovertemplate="<b>%{y}</b><br>Confidence: %{x}%<br>Thinking: %{hovertext}"
|
470 |
+
# )
|
471 |
+
# return fig
|
472 |
+
|
473 |
+
# conversation_id = "111a1"
|
474 |
+
# st.set_page_config(page_title="Virtual Clinician", page_icon="🤖")
|
475 |
+
# st.title("Virtual Clinician :toolbox:")
|
476 |
+
|
477 |
+
# if "chat_history" not in st.session_state:
|
478 |
+
# st.session_state.chat_history = get_conversation(conversation_id)[2] or [
|
479 |
+
# {"role": "AI", "content": "Hello, I am the virtual clinician. How can I help you today?"},
|
480 |
+
# ]
|
481 |
+
|
482 |
+
# for message in st.session_state.chat_history:
|
483 |
+
# with st.chat_message(message["role"]):
|
484 |
+
# st.write(message["content"])
|
485 |
+
|
486 |
+
# user_query = st.chat_input("Type your message here...")
|
487 |
+
# if user_query:
|
488 |
+
# st.session_state.chat_history.append({"role": "Human", "content": user_query})
|
489 |
+
# with st.chat_message("Human"):
|
490 |
+
# st.markdown(user_query)
|
491 |
+
|
492 |
+
# exists, count, conversation_obj = get_conversation(conversation_id)
|
493 |
+
# if not exists:
|
494 |
+
# response_data = converse_with_patient("", user_query)
|
495 |
+
# st.session_state.chat_history.append({"role": "AI", "content": response_data})
|
496 |
+
# store_conversation(conversation_id, conversation_id, user_query, response_data)
|
497 |
+
|
498 |
+
|
499 |
+
# response_data = converse_with_patient(st.session_state.chat_history, user_query)
|
500 |
+
# st.session_state.chat_history.append({"role": "AI", "content": response_data})
|
501 |
+
# update_conversation(conversation_id, conversation_id, user_query, response_data)
|
502 |
+
|
503 |
+
# if count >= 6:
|
504 |
+
# outcome, diagnosis = create_diagnosis(conversation_obj)
|
505 |
+
# if outcome == "PASS":
|
506 |
+
# st.subheader("Diagnosis Summary")
|
507 |
+
# st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
|
508 |
+
# st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
|
509 |
+
# st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
|
510 |
+
# st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
|
511 |
+
# st.write(f"**Next Best Action:** {diagnosis['next_best_action_']}")
|
512 |
+
# st.write(f"**Next Best Action Explanation:** {diagnosis['next_best_action_explanation']}")
|
513 |
+
# st.write(f"**Next Best Action Confidence:** {diagnosis['next_best_action_confidence']}%")
|
514 |
+
# st.subheader("Expert Confidence Levels")
|
515 |
+
# fig = generate_expert_confidence_chart(diagnosis)
|
516 |
+
# st.plotly_chart(fig)
|
517 |
+
# else:
|
518 |
+
# st.write("Diagnosis not available")
|
519 |
+
|
520 |
+
# with st.chat_message("AI"):
|
521 |
+
# st.write(response_data)
|
522 |
+
|
523 |
+
# store_conversation(conversation_id, conversation_id, "", st.session_state.chat_history)
|
524 |
+
|
525 |
+
# from fastapi import FastAPI, HTTPException
|
526 |
+
# import streamlit as st
|
527 |
+
# import pandas as pd
|
528 |
+
# from pydantic import BaseModel
|
529 |
+
# import numpy as np
|
530 |
+
# import plotly.graph_objects as go
|
531 |
+
|
532 |
+
# from azure_openai import converse_with_patient, create_diagnosis
|
533 |
+
# from memory import get_conversation, store_conversation, update_conversation, retrieve_conversation
|
534 |
+
|
535 |
+
# class AskQuestion(BaseModel):
|
536 |
+
# user_input: str
|
537 |
+
# id: str
|
538 |
+
|
539 |
+
# app = FastAPI()
|
540 |
+
|
541 |
+
# def generate_expert_confidence_chart(diagnosis):
|
542 |
+
# """
|
543 |
+
# Extracts expert confidence data from JSON and generates a multi-colored bar chart.
|
544 |
+
# """
|
545 |
+
# expert_distribution = diagnosis.get("expert_distribution", {})
|
546 |
+
# rows = []
|
547 |
+
# for key, value in expert_distribution.items():
|
548 |
+
# expert, attribute = key.rsplit(", ", 1)
|
549 |
+
# rows.append({"Expert": expert, "Attribute": attribute, "Value": value})
|
550 |
+
# df = pd.DataFrame(rows)
|
551 |
+
# df_confidence = df[df["Attribute"] == "confidence"].copy()
|
552 |
+
# df_thinking = df[df["Attribute"] == "thinking"].copy()
|
553 |
+
# df_confidence = df_confidence.merge(df_thinking, on="Expert", suffixes=("_confidence", "_thinking"))
|
554 |
+
# df_confidence["Value_confidence"] = pd.to_numeric(df_confidence["Value_confidence"])
|
555 |
+
|
556 |
+
# def confidence_to_color(confidence):
|
557 |
+
# red = np.array([255, 0, 0])
|
558 |
+
# green = np.array([0, 255, 0])
|
559 |
+
# blend_ratio = confidence / 100
|
560 |
+
# blended_color = (1 - blend_ratio) * red + blend_ratio * green
|
561 |
+
# return f"rgb({int(blended_color[0])}, {int(blended_color[1])}, {int(blended_color[2])})"
|
562 |
+
|
563 |
+
# df_confidence["Color"] = df_confidence["Value_confidence"].apply(confidence_to_color)
|
564 |
+
# fig = go.Figure()
|
565 |
+
# fig.add_trace(go.Bar(
|
566 |
+
# y=df_confidence["Expert"],
|
567 |
+
# x=df_confidence["Value_confidence"],
|
568 |
+
# text=df_confidence["Value_confidence"],
|
569 |
+
# hovertext=df_confidence["Value_thinking"],
|
570 |
+
# orientation="h",
|
571 |
+
# marker=dict(color=df_confidence["Color"]),
|
572 |
+
# width=0.3,
|
573 |
+
# textposition="inside"
|
574 |
+
# ))
|
575 |
+
# fig.update_layout(
|
576 |
+
# title="Expert Confidence in Diagnosis",
|
577 |
+
# xaxis_title="Confidence Score",
|
578 |
+
# yaxis_title="Medical Expert",
|
579 |
+
# yaxis=dict(tickmode="linear", dtick=1, automargin=True),
|
580 |
+
# height=max(400, 40 * len(df_confidence)),
|
581 |
+
# bargap=0.1
|
582 |
+
# )
|
583 |
+
# fig.update_traces(
|
584 |
+
# hovertemplate="<b>%{y}</b><br>Confidence: %{x}%<br>Thinking: %{hovertext}"
|
585 |
+
# )
|
586 |
+
# return fig
|
587 |
+
|
588 |
+
# conversation_id = "111a1"
|
589 |
+
# st.set_page_config(page_title="Virtual Clinician", page_icon="🤖")
|
590 |
+
# st.title("Virtual Clinician :toolbox:")
|
591 |
+
|
592 |
+
# if "chat_history" not in st.session_state:
|
593 |
+
# st.session_state.chat_history = get_conversation(conversation_id)[2] or [
|
594 |
+
# {"role": "AI", "content": "Hello, I am the virtual clinician. How can I help you today?"},
|
595 |
+
# ]
|
596 |
+
|
597 |
+
# for message in st.session_state.chat_history:
|
598 |
+
# with st.chat_message(message["role"]):
|
599 |
+
# st.write(message["content"])
|
600 |
+
|
601 |
+
# user_query = st.chat_input("Type your message here...")
|
602 |
+
# if user_query:
|
603 |
+
# st.session_state.chat_history.append({"role": "Human", "content": user_query})
|
604 |
+
# with st.chat_message("Human"):
|
605 |
+
# st.markdown(user_query)
|
606 |
+
|
607 |
+
# exists, count, conversation_obj = get_conversation(conversation_id)
|
608 |
+
# if not exists:
|
609 |
+
# response = converse_with_patient("", user_query)
|
610 |
+
# store_conversation(conversation_id, conversation_id, user_query, response)
|
611 |
+
# exists, count, conversation_obj = get_conversation(conversation_id)
|
612 |
+
|
613 |
+
# response_data = converse_with_patient(st.session_state.chat_history, user_query)
|
614 |
+
# st.session_state.chat_history.append({"role": "AI", "content": response_data})
|
615 |
+
# update_conversation(conversation_id, conversation_id, user_query, response_data)
|
616 |
+
|
617 |
+
# if count >= 6:
|
618 |
+
# outcome, diagnosis = create_diagnosis(conversation_obj)
|
619 |
+
# if outcome == "PASS":
|
620 |
+
# st.subheader("Diagnosis Summary")
|
621 |
+
# st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
|
622 |
+
# st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
|
623 |
+
# st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
|
624 |
+
# st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
|
625 |
+
# st.write(f"**Next Best Action:** {diagnosis['next_best_action_']}")
|
626 |
+
# st.write(f"**Next Best Action Explanation:** {diagnosis['next_best_action_explanation']}")
|
627 |
+
# st.write(f"**Next Best Action Confidence:** {diagnosis['next_best_action_confidence']}%")
|
628 |
+
# st.subheader("Expert Confidence Levels")
|
629 |
+
# fig = generate_expert_confidence_chart(diagnosis)
|
630 |
+
# st.plotly_chart(fig)
|
631 |
+
# else:
|
632 |
+
# st.write("Diagnosis not available")
|
633 |
+
|
634 |
+
# with st.chat_message("AI"):
|
635 |
+
# st.write(response_data)
|
636 |
+
|
637 |
+
# store_conversation(conversation_id, conversation_id, "", st.session_state.chat_history)
|
638 |
+
|
639 |
+
# conversation_id = "111a1"
|
640 |
+
# st.set_page_config(page_title="Virtual Clinician", page_icon="🤖")
|
641 |
+
# st.title("Virtual Clinician :toolbox:")
|
642 |
+
|
643 |
+
# # Fetch conversation history and ensure it's a list of dictionaries
|
644 |
+
# exists, count, conversation_obj = get_conversation(conversation_id)
|
645 |
+
|
646 |
+
# if "chat_history" not in st.session_state:
|
647 |
+
# if isinstance(conversation_obj, list) and all(isinstance(item, dict) for item in conversation_obj):
|
648 |
+
# st.session_state.chat_history = conversation_obj
|
649 |
+
# else:
|
650 |
+
# st.session_state.chat_history = [
|
651 |
+
# {"role": "AI", "content": "Hello, I am the virtual clinician. How can I help you today?"},
|
652 |
+
# ]
|
653 |
+
|
654 |
+
# # Ensure each message is a dictionary before accessing its keys
|
655 |
+
# for message in st.session_state.chat_history:
|
656 |
+
# if isinstance(message, dict) and "role" in message and "content" in message:
|
657 |
+
# with st.chat_message(message["role"]):
|
658 |
+
# st.write(message["content"])
|
659 |
+
# else:
|
660 |
+
# st.error("Invalid message format in chat history.")
|
661 |
+
|
662 |
+
# user_query = st.chat_input("Type your message here...")
|
663 |
+
# if user_query:
|
664 |
+
# st.session_state.chat_history.append({"role": "Human", "content": user_query})
|
665 |
+
# with st.chat_message("Human"):
|
666 |
+
# st.markdown(user_query)
|
667 |
+
|
668 |
+
# exists, count, conversation_obj = get_conversation(conversation_id)
|
669 |
+
|
670 |
+
# if not exists:
|
671 |
+
# response = converse_with_patient("", user_query)
|
672 |
+
# store_conversation(conversation_id, conversation_id, user_query, response)
|
673 |
+
# exists, count, conversation_obj = get_conversation(conversation_id)
|
674 |
+
|
675 |
+
# response_data = converse_with_patient(st.session_state.chat_history, user_query)
|
676 |
+
# st.session_state.chat_history.append({"role": "AI", "content": response_data})
|
677 |
+
# update_conversation(conversation_id, conversation_id, user_query, response_data)
|
678 |
+
|
679 |
+
# if count >= 6:
|
680 |
+
# outcome, diagnosis = create_diagnosis(conversation_obj)
|
681 |
+
# if outcome == "PASS":
|
682 |
+
# st.subheader("Diagnosis Summary")
|
683 |
+
# st.write(f"**Consensus Confidence:** {diagnosis.get('concensus_confidence', 'N/A')}%")
|
684 |
+
# st.write(f"**Consensus Thinking:** {diagnosis.get('concensus_thinking', 'N/A')}")
|
685 |
+
# st.write(f"**Evaluation Confidence:** {diagnosis.get('evaluate_confidence', 'N/A')}%")
|
686 |
+
# st.write(f"**Evaluation Explanation:** {diagnosis.get('evaluate_explanation', 'N/A')}")
|
687 |
+
# st.write(f"**Next Best Action:** {diagnosis.get('next_best_action_', 'N/A')}")
|
688 |
+
# st.write(f"**Next Best Action Explanation:** {diagnosis.get('next_best_action_explanation', 'N/A')}")
|
689 |
+
# st.write(f"**Next Best Action Confidence:** {diagnosis.get('next_best_action_confidence', 'N/A')}%")
|
690 |
+
# st.subheader("Expert Confidence Levels")
|
691 |
+
# fig = generate_expert_confidence_chart(diagnosis)
|
692 |
+
# st.plotly_chart(fig)
|
693 |
+
# else:
|
694 |
+
# st.write("Diagnosis not available")
|
695 |
+
|
696 |
+
# with st.chat_message("AI"):
|
697 |
+
# st.write(response_data)
|
698 |
+
|
699 |
+
# store_conversation(conversation_id, conversation_id, "", st.session_state.chat_history)
|
azure_openai.py
ADDED
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
import streamlit as st
|
4 |
+
# from langchain.chat_models import AzureChatOpenAI
|
5 |
+
from langchain_openai import AzureChatOpenAI
|
6 |
+
from langchain_core.output_parsers import StrOutputParser, PydanticOutputParser
|
7 |
+
from langchain_core.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate
|
8 |
+
from pydantic import BaseModel, Field, validator
|
9 |
+
from langchain.output_parsers.enum import EnumOutputParser
|
10 |
+
from langchain_core.prompts import PromptTemplate
|
11 |
+
from langchain_core.runnables import RunnableParallel
|
12 |
+
from enum import Enum
|
13 |
+
from helpers import read_md_files_from_doctors
|
14 |
+
|
15 |
+
|
16 |
+
# os.environ["LANGCHAIN_TRACING_V2"]="true"
|
17 |
+
# os.environ["LANGCHAIN_ENDPOINT"]="https://api.smith.langchain.com"
|
18 |
+
# LANGCHAIN_API_KEY = st.secrets['LANGCHAIN_API_KEY']
|
19 |
+
# os.environ["LANGCHAIN_PROJECT"]="UC2e2e"
|
20 |
+
|
21 |
+
# LLM Langchain Definition
|
22 |
+
OPENAI_API_KEY = st.secrets['azure_api_key']
|
23 |
+
OPENAI_API_TYPE = "azure"
|
24 |
+
OPENAI_API_BASE = "https://davidfearn-gpt4.openai.azure.com"
|
25 |
+
OPENAI_API_VERSION = "2024-08-01-preview"
|
26 |
+
OPENAI_MODEL = "gpt-4o-mini"
|
27 |
+
|
28 |
+
llm = AzureChatOpenAI(
|
29 |
+
openai_api_version=OPENAI_API_VERSION,
|
30 |
+
openai_api_key=OPENAI_API_KEY,
|
31 |
+
azure_endpoint=OPENAI_API_BASE,
|
32 |
+
openai_api_type=OPENAI_API_TYPE,
|
33 |
+
deployment_name=OPENAI_MODEL,
|
34 |
+
temperature=0,
|
35 |
+
)
|
36 |
+
|
37 |
+
|
38 |
+
# Function to read file contents
|
39 |
+
def read_file(folder, file):
|
40 |
+
"""
|
41 |
+
Reads the content of a text file and returns it as a string.
|
42 |
+
:param file: The file name to read from the 'assets' directory.
|
43 |
+
:return: The content of the file as a string or None if an error occurs.
|
44 |
+
"""
|
45 |
+
fp = f"{folder}/{file}.md"
|
46 |
+
try:
|
47 |
+
with open(fp, 'r', encoding='utf-8') as file:
|
48 |
+
content = file.read()
|
49 |
+
return content
|
50 |
+
except FileNotFoundError:
|
51 |
+
print(f"The file at {fp} was not found.")
|
52 |
+
except IOError:
|
53 |
+
print(f"An error occurred while reading the file at {fp}.")
|
54 |
+
return None
|
55 |
+
|
56 |
+
# Function to generate structured insights
|
57 |
+
def converse_with_patient(converstaional_history, latest_message):
|
58 |
+
# test coversational history if empty return string "first iteraction" else return converstaional history
|
59 |
+
if converstaional_history == False:
|
60 |
+
converstaional_history = "first iteraction"
|
61 |
+
|
62 |
+
SystemMessage = read_file("conversation", "intake_system")
|
63 |
+
UserMessage = "This is the conversational history between the patient and the virtual doctor if one exists {converstaional_history} and this is the latest message from the patient {latest_message}"
|
64 |
+
|
65 |
+
# class symptoms_capture(BaseModel):
|
66 |
+
# follow_up_question: str = Field(description="This feild is used to ask a question to refine your understanding of users symtoms")
|
67 |
+
|
68 |
+
system_message_template = SystemMessagePromptTemplate.from_template(SystemMessage)
|
69 |
+
# structured_llm = llm.with_structured_output(symptoms_capture)
|
70 |
+
prompt = ChatPromptTemplate.from_messages([system_message_template, UserMessage])
|
71 |
+
|
72 |
+
chain = prompt | llm | StrOutputParser()
|
73 |
+
|
74 |
+
|
75 |
+
return chain.invoke({"converstaional_history": converstaional_history, "latest_message": latest_message})
|
76 |
+
|
77 |
+
|
78 |
+
|
79 |
+
def diagnosis_consensus(diagnosis_results, conversational_history):
|
80 |
+
# Take the diagnosis results from the different models and generate a consensus diagnosis. The consensus diagnosis should be based on the confidence levels of the different models. The model with the highest confidence level should be given the most weight in the consensus diagnosis. The consensus diagnosis should be returned as a string.
|
81 |
+
class Diagnosis_Consensus(BaseModel):
|
82 |
+
diagnosis: str = Field(description="This field is used to give the diagnosis")
|
83 |
+
explanation: str = Field(description="This field is used to explain the diagnosis")
|
84 |
+
confidence: int = Field(description="This field is used to show the confidence level of the diagnosis")
|
85 |
+
|
86 |
+
|
87 |
+
|
88 |
+
system_message_template = SystemMessagePromptTemplate.from_template("You are a medical manager you will be given a number of diagnosis from different doctors as well as the original conversation that occurred between the triaging doctor and the patient. You will need to provide a consensus diagnosis based on the confidence levels of the different doctors. You need to provide a final diagnosis, an explanation of the diagnosis and you overall confidence in the diagnosis. ONLY USE THE INFORMATION PROVIDED TO YOU. ONLY CHOOSE ONE DIAGNOSIS")
|
89 |
+
structured_llm_consensus = llm.with_structured_output(Diagnosis_Consensus)
|
90 |
+
StandardUserMessage = "This is the conversational history between the patient and the virtual doctor {conversational_history}"
|
91 |
+
prompt = ChatPromptTemplate.from_messages([system_message_template, StandardUserMessage])
|
92 |
+
chain = prompt | structured_llm_consensus
|
93 |
+
response = chain.invoke({"conversational_history": diagnosis_results})
|
94 |
+
|
95 |
+
return {
|
96 |
+
"diagnosis": response.diagnosis,
|
97 |
+
"explanation": response.explanation,
|
98 |
+
"confidence": response.confidence
|
99 |
+
}
|
100 |
+
|
101 |
+
|
102 |
+
def evaluate_diagnosis(diagnosis_results, conversational_history):
|
103 |
+
#this is a simularity function that will compare the diagnosis results and the conversational history to known symtoms for this diagnosis and return a confidence level of the diagnosis
|
104 |
+
class Diagnosis_Evaluation(BaseModel):
|
105 |
+
confidence: int = Field(description="This field is used to show the confidence level of the diagnosis")
|
106 |
+
explanation: str = Field(description="This field is used to explain the confidence level of the diagnosis")
|
107 |
+
|
108 |
+
system_message_template = SystemMessagePromptTemplate.from_template(read_file("evaluator", "eval_system_message"))
|
109 |
+
StandardUserMessage = "This is the conversational history between the patient and the virtual doctor: {conversational_history}, this is the diagnosis our AI system has come up with: {diagnosis_results}"
|
110 |
+
structured_llm_consensus = llm.with_structured_output(Diagnosis_Evaluation)
|
111 |
+
prompt = ChatPromptTemplate.from_messages([system_message_template, StandardUserMessage])
|
112 |
+
chain = prompt | structured_llm_consensus
|
113 |
+
response = chain.invoke({"diagnosis_results": diagnosis_results, "conversational_history": conversational_history})
|
114 |
+
|
115 |
+
return {
|
116 |
+
"confidence": response.confidence,
|
117 |
+
"explanation": response.explanation
|
118 |
+
}
|
119 |
+
|
120 |
+
def next_best_action(diagnosis, conversational_history):
|
121 |
+
# Take the diagnosis and return the next best action for the patient. The next best action should be based on the diagnosis and the conversational history. The next best action should be returned as a string.
|
122 |
+
# Next best actions available are: Goto the GP / family doctor, goto a specialist, goto the hospital, go to the pharmacist or stay at home and rest.
|
123 |
+
|
124 |
+
class actions(str, Enum):
|
125 |
+
GP = "GP / family doctor"
|
126 |
+
SPECIALIST = "specialist"
|
127 |
+
HOSPITAL = "hospital"
|
128 |
+
PHARMACIST = "pharmacist"
|
129 |
+
REST = "stay at home and rest"
|
130 |
+
|
131 |
+
# Define the Pydantic model for the structured output
|
132 |
+
|
133 |
+
class next_best_action(BaseModel):
|
134 |
+
action: actions = Field(description="This field is used to give the next best action for the patient")
|
135 |
+
explanation: str = Field(description="This field is used to explain the next best action for the patient")
|
136 |
+
confidence: int = Field(description="This field is used to show the confidence level of the next best action")
|
137 |
+
|
138 |
+
system_message_template = SystemMessagePromptTemplate.from_template(read_file("nba", "nba_system"))
|
139 |
+
structured_llm_action = llm.with_structured_output(next_best_action)
|
140 |
+
StandardUserMessage = "This is the conversational history between the patient and the virtual doctor: {conversational_history}, and this is the diagnosis: {diagnosis}"
|
141 |
+
prompt = ChatPromptTemplate.from_messages([system_message_template, StandardUserMessage])
|
142 |
+
chain = prompt | structured_llm_action
|
143 |
+
|
144 |
+
response = chain.invoke({"diagnosis": diagnosis, "conversational_history": conversational_history})
|
145 |
+
|
146 |
+
return {"action": response.action.value, "explanation": response.explanation, "confidence": response.confidence}
|
147 |
+
|
148 |
+
|
149 |
+
def create_diagnosis(converstaional_history):
|
150 |
+
# Take the conversational history and generate a diagnosis. The diagnosis should pass the conversational history to a model specific to a medical specialty. This specialty will be provided by system messages in the doctors folder. The system messages will be in .md format.
|
151 |
+
|
152 |
+
class Diagnosis(BaseModel):
|
153 |
+
diagnosis: str = Field(description="This field is used to give the diagnosis")
|
154 |
+
confidence: int = Field(description="This field is used to show the confidence level of the diagnosis")
|
155 |
+
thinking: str = Field(description="This field is used to show the thinking of why you choose the diagnosis")
|
156 |
+
|
157 |
+
|
158 |
+
system_message_df = read_md_files_from_doctors()
|
159 |
+
structured_llm_diagnosis = llm.with_structured_output(Diagnosis)
|
160 |
+
StandardUserMessage = "This is the conversational history between the patient and the virtual doctor {conversational_history}"
|
161 |
+
|
162 |
+
# Extract the system messages from system_message_df content column and the name of the file from the filename column
|
163 |
+
# In parallel, run the system messages through the model to get the diagnosis
|
164 |
+
# For each system message in system_message_df, create a chain named by the content filename column
|
165 |
+
|
166 |
+
|
167 |
+
chains = {}
|
168 |
+
for index, row in system_message_df.iterrows():
|
169 |
+
system_message_template = SystemMessagePromptTemplate.from_template(row['content'])
|
170 |
+
prompt = ChatPromptTemplate.from_messages([system_message_template, StandardUserMessage])
|
171 |
+
chains[row['filename']] = prompt | structured_llm_diagnosis
|
172 |
+
|
173 |
+
map_chain = RunnableParallel(**chains)
|
174 |
+
|
175 |
+
response = map_chain.invoke({"conversational_history": converstaional_history})
|
176 |
+
diagnosis_results = {}
|
177 |
+
for filename, result in response.items():
|
178 |
+
filename_base = filename.split('.')[0]
|
179 |
+
diagnosis_results[f"{filename_base}, diagnosis"] = result.diagnosis
|
180 |
+
diagnosis_results[f"{filename_base}, thinking"] = result.thinking
|
181 |
+
diagnosis_results[f"{filename_base}, confidence"] = result.confidence
|
182 |
+
|
183 |
+
|
184 |
+
|
185 |
+
|
186 |
+
consensus = diagnosis_consensus(diagnosis_results, converstaional_history)
|
187 |
+
# Return the diagnosis and the confidence level of the diagnosis
|
188 |
+
# """{
|
189 |
+
# "confidence": response.confidence,
|
190 |
+
# "explanation": response.explanation
|
191 |
+
# }"""
|
192 |
+
print(consensus["confidence"])
|
193 |
+
if consensus["confidence"] < 80:
|
194 |
+
return "FAIL1", {
|
195 |
+
"diagnosis_completion": "fail",
|
196 |
+
"concensus_diagnosis": consensus["diagnosis"],
|
197 |
+
"expert_distribution": diagnosis_results,
|
198 |
+
"concensus_confidence": consensus["confidence"],
|
199 |
+
"concensus_thinking": consensus["explanation"],
|
200 |
+
"next_best_action_": "GP / family doctor",
|
201 |
+
"Error": "The confidence level of the consensus diagnosis is below 80%"
|
202 |
+
}
|
203 |
+
|
204 |
+
|
205 |
+
final_diagnosis = evaluate_diagnosis(consensus, converstaional_history)
|
206 |
+
# return the final diagnosis and the confidence level of the final diagnosis
|
207 |
+
# """{
|
208 |
+
# "diagnosis": response.diagnosis,
|
209 |
+
# "explanation": response.explanation,
|
210 |
+
# "confidence": response.confidence
|
211 |
+
# }"""
|
212 |
+
|
213 |
+
if final_diagnosis["confidence"] < 80:
|
214 |
+
return "FAIL2", {
|
215 |
+
"diagnosis_completion": "fail",
|
216 |
+
"concensus_diagnosis": consensus["diagnosis"],
|
217 |
+
"expert_distribution": diagnosis_results,
|
218 |
+
"concensus_confidence": consensus["confidence"],
|
219 |
+
"concensus_thinking": consensus["explanation"],
|
220 |
+
"evaluate_confidence": final_diagnosis["diagnosis"],
|
221 |
+
"evaluate_confidence": final_diagnosis["confidence"],
|
222 |
+
"evaluate_explanation": final_diagnosis["explanation"],
|
223 |
+
"next_best_action_": "GP / family doctor",
|
224 |
+
"Error": "The confidence level of the final diagnosis is below 80%"
|
225 |
+
}
|
226 |
+
|
227 |
+
next_action = next_best_action(consensus["diagnosis"], converstaional_history)
|
228 |
+
# return the next best action for the patient
|
229 |
+
# {"action": response.action, "explanation": response.explanation, "confidence": response.confidence}
|
230 |
+
|
231 |
+
return "SUCCESS", {
|
232 |
+
"diagnosis_completion": "success",
|
233 |
+
"concensus_diagnosis": consensus["diagnosis"],
|
234 |
+
"expert_distribution": diagnosis_results,
|
235 |
+
"concensus_confidence": consensus["confidence"],
|
236 |
+
"concensus_thinking": consensus["explanation"],
|
237 |
+
"evaluate_confidence": final_diagnosis["confidence"],
|
238 |
+
"evaluate_explanation": final_diagnosis["explanation"],
|
239 |
+
"next_best_action_": next_action["action"],
|
240 |
+
"next_best_action_explanation": next_action["explanation"],
|
241 |
+
"next_best_action_confidence": next_action["confidence"]
|
242 |
+
}
|
243 |
+
|
244 |
+
|
245 |
+
# conversation = """{'conversation_id': '12345623', 'patient': 'John Doe', 'conversation': [{'role': 'user', 'content': 'I have a pain in my lower right hand side.'}, {'role': 'assistant', 'content': 'how bad is the pain on a scale of 1 to 10?'}, {'role': 'user', 'content': '10 out of 10 and its lasted 2 hours'}, {'role': 'assistant', 'content': 'Does the pain radiate to any other part of your body?'}, {'role': 'user', 'content': 'No its just in my abdomen.'}, {'role': 'assistant', 'content': 'have you had any other symptoms like nausea or vomiting?'}, {'role': 'user', 'content': 'I do feel a bit sick'}, {'role': 'assistant', 'content': 'thank you for this information'}]}
|
246 |
+
# ('PASS', 4, {'conversation_id': '12345623', 'patient': 'John Doe', 'conversation': [{'role': 'user', 'content': 'I have a pain in my lower right hand side.'}, {'role': 'assistant', 'content': 'how bad is the pain on a scale of 1 to 10?'}, {'role': 'user', 'content': '10 out of 10 and its lasted 2 hours'}, {'role': 'assistant', 'content': 'Does the pain radiate
|
247 |
+
# to any other part of your body?'}, {'role': 'user', 'content': 'No its just in my abdomen.'}, {'role': 'assistant', 'content': 'have you had any other symptoms like nausea or vomiting?'}, {'role': 'user', 'content': 'I do feel a bit sick'}, {'role': 'assistant', 'content': 'thank you for this information'}]})"""
|
248 |
+
|
249 |
+
# print(create_diagnosis(conversation))
|
250 |
+
|
251 |
+
|
252 |
+
|
253 |
+
|
helpers.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
def read_md_files_from_doctors():
|
5 |
+
doctors_folder = 'doctors'
|
6 |
+
data = []
|
7 |
+
|
8 |
+
for filename in os.listdir(doctors_folder):
|
9 |
+
if filename.endswith('.md'):
|
10 |
+
file_path = os.path.join(doctors_folder, filename)
|
11 |
+
with open(file_path, 'r', encoding='utf-8') as file:
|
12 |
+
content = file.read()
|
13 |
+
data.append({'filename': filename, 'content': content})
|
14 |
+
|
15 |
+
df = pd.DataFrame(data)
|
16 |
+
return df
|
17 |
+
|
18 |
+
# Example usage
|
19 |
+
|
memory.py
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file will be used to store the interim conversational state of the doctor and patient and then store the various diagnostic results and the final diagnosis and next best action.
|
2 |
+
# To store the data we will use Azure blob store and each file will be stored as a json file with the conversation id as the file name.
|
3 |
+
|
4 |
+
# Steps
|
5 |
+
# 1. Create a class to store the conversation state
|
6 |
+
# 2. Create a function to check if this is a new conversation or an existing conversation
|
7 |
+
# 3. Create a function to count interactions in the conversation if existing exists
|
8 |
+
# 4. Create a function to store the conversation in Azure blob storage
|
9 |
+
# 5. Create a function to retrieve the conversation from Azure blob storage
|
10 |
+
# 6. Create a function to update the conversation in Azure blob storage
|
11 |
+
# 7. Create a function to delete the conversation from Azure blob storage
|
12 |
+
|
13 |
+
# Import the required libraries
|
14 |
+
import os
|
15 |
+
import json
|
16 |
+
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
|
17 |
+
|
18 |
+
# Azure connection string
|
19 |
+
connect_str = "DefaultEndpointsProtocol=https;AccountName=chatlogs;AccountKey=7tiHxmMbbdEp3/yrydWOEoAc7PPDkVHXV5QXZWR1jTH0hX8/O2gIrSHwMNqXwEnEwWWAD8pqIwuV+AStWqHVNA==;EndpointSuffix=core.windows.net"
|
20 |
+
container_name = "vme25"
|
21 |
+
|
22 |
+
# Create a class to store the conversation state
|
23 |
+
class Conversation:
|
24 |
+
def __init__(self, conversation_id, patient, conversation):
|
25 |
+
self.conversation_id = conversation_id
|
26 |
+
self.patient = patient
|
27 |
+
self.conversation = conversation
|
28 |
+
|
29 |
+
# Create a function to check if this is a new conversation or an existing conversation
|
30 |
+
def check_conversation(conversation_id):
|
31 |
+
# Create a blob service client
|
32 |
+
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
|
33 |
+
container_client = blob_service_client.get_container_client(container_name)
|
34 |
+
blob_client = container_client.get_blob_client(conversation_id)
|
35 |
+
if blob_client.exists():
|
36 |
+
return True
|
37 |
+
else:
|
38 |
+
return False
|
39 |
+
|
40 |
+
# Create a function to count interactions in the conversation if existing exists
|
41 |
+
def count_interactions(conversation_id):
|
42 |
+
# Create a blob service client
|
43 |
+
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
|
44 |
+
container_client = blob_service_client.get_container_client(container_name)
|
45 |
+
blob_client = container_client.get_blob_client(conversation_id)
|
46 |
+
|
47 |
+
# Retrieve the existing conversation
|
48 |
+
conversation_json = blob_client.download_blob().readall()
|
49 |
+
conversation_obj = json.loads(conversation_json)
|
50 |
+
|
51 |
+
# Count the number of user interactions
|
52 |
+
user_interactions = sum(1 for message in conversation_obj['conversation'] if message['role'] == 'user')
|
53 |
+
|
54 |
+
return user_interactions
|
55 |
+
|
56 |
+
# Create a function to store the conversation in Azure blob storage
|
57 |
+
def initail_conversation(conversation_id, patient, from_user, response_from_ai):
|
58 |
+
# Create a blob service client
|
59 |
+
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
|
60 |
+
container_client = blob_service_client.get_container_client(container_name)
|
61 |
+
blob_client = container_client.get_blob_client(conversation_id)
|
62 |
+
|
63 |
+
# Initialize the conversation with the first user and assistant messages
|
64 |
+
conversation = [
|
65 |
+
{'role': 'user', 'content': from_user},
|
66 |
+
{'role': 'assistant', 'content': response_from_ai}
|
67 |
+
]
|
68 |
+
|
69 |
+
conversation_obj = Conversation(conversation_id, patient, conversation)
|
70 |
+
conversation_json = json.dumps(conversation_obj.__dict__)
|
71 |
+
blob_client.upload_blob(conversation_json)
|
72 |
+
|
73 |
+
# Create a function to retrieve the conversation from Azure blob storage
|
74 |
+
def retrieve_conversation(conversation_id):
|
75 |
+
print("Retrieving conversation")
|
76 |
+
# Create a blob service client
|
77 |
+
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
|
78 |
+
container_client = blob_service_client.get_container_client(container_name)
|
79 |
+
blob_client = container_client.get_blob_client(conversation_id)
|
80 |
+
conversation_json = blob_client.download_blob().readall()
|
81 |
+
conversation_obj = json.loads(conversation_json)
|
82 |
+
return conversation_obj
|
83 |
+
|
84 |
+
# Create a function to update the conversation in Azure blob storage
|
85 |
+
def update_conversation(conversation_id, from_user, response_from_ai):
|
86 |
+
paitent = "patient"
|
87 |
+
print("Updating conversation")
|
88 |
+
# Create a blob service client
|
89 |
+
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
|
90 |
+
container_client = blob_service_client.get_container_client(container_name)
|
91 |
+
blob_client = container_client.get_blob_client(conversation_id)
|
92 |
+
|
93 |
+
# Retrieve the existing conversation
|
94 |
+
conversation_obj = retrieve_conversation(conversation_id)
|
95 |
+
|
96 |
+
# Update the conversation with new messages
|
97 |
+
conversation_obj['conversation'].append({'role': 'user', 'content': from_user})
|
98 |
+
conversation_obj['conversation'].append({'role': 'assistant', 'content': response_from_ai})
|
99 |
+
|
100 |
+
# Convert the updated conversation to JSON
|
101 |
+
conversation_json = json.dumps(conversation_obj)
|
102 |
+
|
103 |
+
# Upload the updated conversation, overwriting the existing blob
|
104 |
+
blob_client.upload_blob(conversation_json, overwrite=True)
|
105 |
+
return conversation_json
|
106 |
+
|
107 |
+
def write_diagnosis(conversation_id, diagnosis):
|
108 |
+
# Create a blob service client
|
109 |
+
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
|
110 |
+
container_client = blob_service_client.get_container_client(container_name)
|
111 |
+
blob_client = container_client.get_blob_client(conversation_id)
|
112 |
+
|
113 |
+
# Retrieve the existing conversation
|
114 |
+
conversation_obj = retrieve_conversation(conversation_id)
|
115 |
+
|
116 |
+
# Update the conversation with new messages
|
117 |
+
conversation_obj['conversation'].append({'role': 'diagnosis', 'content': diagnosis})
|
118 |
+
|
119 |
+
# Convert the updated conversation to JSON
|
120 |
+
conversation_json = json.dumps(conversation_obj)
|
121 |
+
|
122 |
+
# Upload the updated conversation, overwriting the existing blob
|
123 |
+
blob_client.upload_blob(conversation_json, overwrite=True)
|
124 |
+
|
125 |
+
# Create a function to delete the conversation from Azure blob storage
|
126 |
+
def delete_conversation(conversation_id):
|
127 |
+
# Create a blob service client
|
128 |
+
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
|
129 |
+
container_client = blob_service_client.get_container_client(container_name)
|
130 |
+
blob_client = container_client.get_blob_client(conversation_id)
|
131 |
+
blob_client.delete_blob()
|
132 |
+
|
133 |
+
def store_conversation(conversation_id, from_user, response_from_ai):
|
134 |
+
patient = "patient"
|
135 |
+
# Create a blob service client
|
136 |
+
try:
|
137 |
+
print("Storing conversation")
|
138 |
+
res_check = check_conversation(conversation_id)
|
139 |
+
print(res_check)
|
140 |
+
if res_check == False:
|
141 |
+
print("Conversation doesn't exists, starting new conversation")
|
142 |
+
initail_conversation(conversation_id, patient, from_user, response_from_ai)
|
143 |
+
print("Conversation stored")
|
144 |
+
else:
|
145 |
+
update_conversation(conversation_id, patient, from_user, response_from_ai)
|
146 |
+
print("Conversation updated")
|
147 |
+
except Exception as e:
|
148 |
+
return "FAIL", str(e)
|
149 |
+
|
150 |
+
def get_conversation(conversation_id):
|
151 |
+
# Create a blob service client
|
152 |
+
try:
|
153 |
+
res_check = check_conversation(conversation_id)
|
154 |
+
if res_check == True:
|
155 |
+
print("Conversation exists")
|
156 |
+
count = count_interactions(conversation_id)
|
157 |
+
conversation_obj = retrieve_conversation(conversation_id)
|
158 |
+
return "PASS", count, conversation_obj
|
159 |
+
else:
|
160 |
+
print("Conversation doesn't exists")
|
161 |
+
return "FAIL", 0, "Conversation does not exist"
|
162 |
+
except Exception as e:
|
163 |
+
print("an error occured")
|
164 |
+
return "FAIL", str(e)
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
# # Test the functions
|
169 |
+
# conversation_id = "12345623"
|
170 |
+
# patient = "John Doe"
|
171 |
+
# from_user = "I do feel a bit sick"
|
172 |
+
# response_from_ai = "thank you for this information"
|
173 |
+
# # print(get_conversation(conversation_id))
|
174 |
+
|
175 |
+
# store_conversation(conversation_id, patient, from_user, response_from_ai)
|
176 |
+
#print(check_conversation(conversation_id))
|
177 |
+
# print(count_interactions(conversation_id))
|
178 |
+
# conversation_obj = retrieve_conversation(conversation_id)
|
179 |
+
# print(conversation_obj)
|
180 |
+
|
181 |
+
# update_conversation(conversation_id, patient, "What is your name?", "I am an AI assistant.")
|
182 |
+
# conversation_obj = retrieve_conversation(conversation_id)
|
183 |
+
# print(conversation_obj)
|
184 |
+
# delete_conversation(conversation_id)
|
requirements.txt
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain
|
2 |
+
langchain_core
|
3 |
+
langchain-openai
|
4 |
+
pandas
|
5 |
+
langchain-text-splitters
|
6 |
+
tiktoken
|
7 |
+
openevals
|
8 |
+
langsmith
|
9 |
+
azure-storage-blob
|
10 |
+
azure-identity
|
11 |
+
fastAPI==0.111.0
|
12 |
+
streamlit
|
13 |
+
plotly
|