davidfearne commited on
Commit
339056d
·
verified ·
1 Parent(s): fc97cbf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +237 -699
app.py CHANGED
@@ -1,699 +1,237 @@
1
- from fastapi import FastAPI, HTTPException
2
- import streamlit as st
3
- import pandas as pd
4
- from pydantic import BaseModel, Field, validator
5
- import numpy as np
6
- import plotly.graph_objects as go
7
-
8
- from azure_openai import converse_with_patient, create_diagnosis
9
- from memory import get_conversation, store_conversation, update_conversation
10
- import uuid
11
-
12
- class ask_question (BaseModel):
13
- user_input: str
14
- id: str
15
-
16
- app = FastAPI()
17
-
18
-
19
- def generate_expert_confidence_chart(diagnosis):
20
- """
21
- Extracts expert confidence data from JSON and generates a multi-colored bar chart.
22
- """
23
-
24
- # Extract expert distribution data
25
- expert_distribution = diagnosis["expert_distribution"]
26
-
27
- # Process the data into a structured format
28
- rows = []
29
- for key, value in expert_distribution.items():
30
- expert, attribute = key.rsplit(", ", 1) # Ensure splitting at the last comma
31
- rows.append({"Expert": expert, "Attribute": attribute, "Value": value})
32
-
33
- # Create a DataFrame
34
- df = pd.DataFrame(rows)
35
-
36
- # Filter the DataFrame for confidence values only
37
- df_confidence = df[df["Attribute"] == "confidence"].copy()
38
-
39
- # Merge confidence values with corresponding thinking explanations
40
- df_thinking = df[df["Attribute"] == "thinking"].copy()
41
- df_confidence = df_confidence.merge(df_thinking, on="Expert", suffixes=("_confidence", "_thinking"))
42
-
43
- # Convert confidence values to numeric
44
- df_confidence["Value_confidence"] = pd.to_numeric(df_confidence["Value_confidence"])
45
-
46
- # Define a function to map confidence scores to colors
47
- def confidence_to_color(confidence):
48
- """
49
- Maps confidence score (0-100) to a blended color between red (0 confidence) and green (100 confidence).
50
- """
51
- red = np.array([255, 0, 0])
52
- green = np.array([0, 255, 0])
53
- blend_ratio = confidence / 100 # Normalize between 0 and 1
54
- blended_color = (1 - blend_ratio) * red + blend_ratio * green
55
- return f"rgb({int(blended_color[0])}, {int(blended_color[1])}, {int(blended_color[2])})"
56
-
57
- # Apply color mapping
58
- df_confidence["Color"] = df_confidence["Value_confidence"].apply(confidence_to_color)
59
-
60
- # Create the bar chart
61
- fig = go.Figure()
62
-
63
- # Add bars with customized colors and reduced spacing
64
- fig.add_trace(go.Bar(
65
- y=df_confidence["Expert"],
66
- x=df_confidence["Value_confidence"],
67
- text=df_confidence["Value_confidence"],
68
- hovertext=df_confidence["Value_thinking"],
69
- orientation="h",
70
- marker=dict(color=df_confidence["Color"]),
71
- width=0.3, # Reduce bar width for closer spacing
72
- textposition="inside"
73
- ))
74
-
75
- # Update layout for better visibility
76
- fig.update_layout(
77
- title="Expert Confidence in Diagnosis",
78
- xaxis_title="Confidence Score",
79
- yaxis_title="Medical Expert",
80
- yaxis=dict(tickmode="linear", dtick=1, automargin=True),
81
- height=max(400, 40 * len(df_confidence)), # Adjust height dynamically
82
- bargap=0.1 # Reduce spacing between bars
83
- )
84
-
85
- # Update hover template
86
- fig.update_traces(
87
- hovertemplate="<b>%{y}</b><br>Confidence: %{x}%<br>Thinking: %{hovertext}"
88
- )
89
-
90
- # Show the plot
91
- return fig
92
-
93
-
94
- # FastAPI interface routes
95
- # @app.get("/")
96
- # async def root():
97
- # return {"message": "Welcome to the GenAI Symptom Checker"}
98
-
99
- # @app.post("/ask")
100
- # async def ask_question(question: ask_question):
101
- # try:
102
- # user_input = question.user_input
103
- # conversation_id = question.id
104
-
105
- # exists, count, conversation_obj = get_conversation(conversation_id)
106
- # if count == 6:
107
- # response = converse_with_patient(conversation_obj, user_input)
108
- # store_conversation(conversation_id, conversation_id, user_input, response)
109
- # exists, count, conversation_obj = get_conversation(conversation_id)
110
- # diagnosis = create_diagnosis(conversation_obj)
111
- # return {"response": response, "count": count, "diagnosis": diagnosis}
112
- # if count > 6:
113
- # exists, count, conversation_obj = get_conversation(conversation_id)
114
- # diagnosis_content = next((item['content'] for item in conversation_obj if item['role'] == 'diagnosis'), None)
115
- # return {"response": "You have reached the maximum number of questions", "count": count, "diagnosis": diagnosis_content}
116
- # if exists == "PASS":
117
- # response = converse_with_patient(conversation_obj, user_input)
118
- # update_conversation(conversation_id, conversation_id, user_input, response)
119
- # return {"response": response, "count": count, "diagnosis": "none"}
120
-
121
- # else:
122
- # response = converse_with_patient("",user_input)
123
- # store_conversation(conversation_id, conversation_id, user_input, response)
124
- # return {"response": response, "count": count, "diagnosis": "none"}
125
-
126
- # except Exception as e:
127
- # raise HTTPException(status_code=500, detail=str(e))
128
-
129
- # app config
130
-
131
- st.set_page_config(page_title="virtual clinician", page_icon=":medical_symbol:")
132
- st.title("Virtual Clinician :medical_symbol:")
133
-
134
- user_id = st.text_input("Name:", key="user_id")
135
-
136
- conversation_id = user_id
137
- # Ensure user_id is defined or fallback to a default value
138
- if not user_id:
139
- st.warning("Hi, Who am I speaking with?")
140
- else:
141
- # session state
142
- if "chat_history" not in st.session_state:
143
- st.session_state.chat_history = [
144
- {"role": "AI", "content": f"Hello, {user_id} I am the virtual clinician. How can I help you today?"},
145
- ]
146
-
147
-
148
- # conversation
149
- for message in st.session_state.chat_history:
150
- if message["role"] == "AI":
151
- with st.chat_message("AI"):
152
- st.write(message["content"])
153
- elif message["role"] == "Human":
154
- with st.chat_message("Human"):
155
- st.write(message["content"])
156
-
157
- # user input
158
- user_input = st.chat_input("Type your message here...")
159
- if user_input is not None and user_input != "":
160
- st.session_state.chat_history.append({"role": "Human", "content": user_input})
161
-
162
-
163
- with st.chat_message("Human"):
164
- st.markdown(user_input)
165
-
166
- # this functions checks to see if the conversation exists
167
- exists, count, conversation_obj = get_conversation(conversation_id)
168
- # if the conversation does not exist, it creates a new conversation
169
-
170
-
171
- if count > 5:
172
- response = converse_with_patient(conversation_obj, user_input)
173
- conversation_obj = update_conversation(conversation_id, user_input, response)
174
- print(conversation_obj)
175
- with st.spinner("Creating a diagnosis..."):
176
- outcome, diagnosis = create_diagnosis(conversation_obj)
177
- if outcome == "SUCCESS":
178
- st.subheader("Diagnosis Summary")
179
- st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
180
- st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
181
- st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
182
- st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
183
- st.write(f"**Next Best Action:** {diagnosis['next_best_action_']}")
184
- st.write(f"**Next Best Action Explanation:** {diagnosis['next_best_action_explanation']}")
185
- st.write(f"**Next Best Action Confidence:** {diagnosis['next_best_action_confidence']}%")
186
-
187
- # Generate and display the plotly chart
188
- st.subheader("Expert Confidence Levels")
189
- fig = generate_expert_confidence_chart(diagnosis)
190
- st.plotly_chart(fig)
191
-
192
- # if the diagnosis is not successful, display a message
193
- if outcome == "FAIL1":
194
- st.write("Diagnosis not available Failed to find concensus")
195
- st.subheader("Incomplete Diagnosis")
196
- st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
197
- st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
198
- st.write(f"**Next Best Action:** See GP")
199
- st.write(f"**Next Best Action Explanation:** Please give more details to help the AI better understand your symptoms ")
200
-
201
- # Generate and display the plotly chart
202
- st.subheader("Expert Confidence Levels")
203
- fig = generate_expert_confidence_chart(diagnosis)
204
- st.plotly_chart(fig)
205
-
206
- if outcome == "FAIL1":
207
- st.write("Diagnosis not available Failed to match described symptoms with know symptoms for AI diagnosis")
208
- st.subheader("Incomplete Diagnosis")
209
- st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
210
- st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
211
- st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
212
- st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
213
- st.write(f"**Next Best Action:** See GP")
214
- st.write(f"**Next Best Action Explanation:** Please give more details to help the AI better understand your symptoms ")
215
-
216
- # Generate and display the plotly chart
217
- st.subheader("Expert Confidence Levels")
218
- fig = generate_expert_confidence_chart(diagnosis)
219
- st.plotly_chart(fig)
220
-
221
- if exists == "PASS":
222
- response = converse_with_patient(conversation_obj, user_input)
223
- update_conversation(conversation_id, user_input, response)
224
- st.session_state.chat_history.append({"role": "AI", "content": response})
225
- with st.chat_message("AI"):
226
- st.write(response)
227
-
228
- else:
229
- response = converse_with_patient("",user_input)
230
- store_conversation(conversation_id, user_input, response)
231
- st.session_state.chat_history.append({"role": "AI", "content": response})
232
- with st.chat_message("AI"):
233
- st.write(response)
234
-
235
-
236
-
237
-
238
-
239
-
240
-
241
-
242
-
243
-
244
- # if exists == "FAIL":
245
- # response = converse_with_patient("",user_query)
246
- # store_conversation(conversation_id, user_query, response)
247
- # st.session_state.chat_history.append({"role": "AI", "content": response})
248
-
249
- # # if the conversation exists use it to inform the AI's context
250
- # response = converse_with_patient(st.session_state.chat_history, user_query)
251
- # # update the conversation with the new response
252
- # update_conversation(conversation_id, user_query, response)
253
- # once 6 interactions have been made, the AI will generate a diagnosis
254
- # if count > 6:
255
- # # write last question to the chat log
256
- # st.session_state.chat_history.append({"role": "Human", "content": user_query})
257
- # # get an AI response
258
- # response_data = converse_with_patient(st.session_state.chat_history, user_query)
259
- # # write AI response to the chat
260
- # st.session_state.chat_history.append({"role": "AI", "content": response_data})
261
- # # send conversation to the AI to generate a diagnosis
262
- # outcome, diagnosis = create_diagnosis(conversation_obj)
263
- # # if the diagnosis is successful, display the diagnosis data
264
- # if outcome == "PASS":
265
- # st.subheader("Diagnosis Summary")
266
- # st.write(f"**Consensus Confidence:** {['concensus_confidence']}%")
267
- # st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
268
- # st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
269
- # st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
270
- # st.write(f"**Next Best Action:** {diagnosis['next_best_action_']}")
271
- # st.write(f"**Next Best Action Explanation:** {diagnosis['next_best_action_explanation']}")
272
- # st.write(f"**Next Best Action Confidence:** {diagnosis['next_best_action_confidence']}%")
273
-
274
- # # Generate and display the plotly chart
275
- # st.subheader("Expert Confidence Levels")
276
- # fig = generate_expert_confidence_chart(diagnosis)
277
- # st.plotly_chart(fig)
278
-
279
- # # if the diagnosis is not successful, display a message
280
- # else:
281
- # st.write("Diagnosis not available")
282
-
283
-
284
-
285
-
286
-
287
-
288
-
289
- # from fastapi import FastAPI, HTTPException
290
- # import streamlit as st
291
- # import pandas as pd
292
- # from pydantic import BaseModel
293
- # import numpy as np
294
- # import plotly.graph_objects as go
295
-
296
- # from azure_openai import converse_with_patient, create_diagnosis
297
- # from memory import get_conversation, store_conversation, update_conversation, retrieve_conversation
298
-
299
- # class AskQuestion(BaseModel):
300
- # user_input: str
301
- # id: str
302
-
303
- # app = FastAPI()
304
-
305
- # def generate_expert_confidence_chart(diagnosis):
306
- # """
307
- # Extracts expert confidence data from JSON and generates a multi-colored bar chart.
308
- # """
309
- # expert_distribution = diagnosis.get("expert_distribution", {})
310
- # rows = []
311
- # for key, value in expert_distribution.items():
312
- # expert, attribute = key.rsplit(", ", 1)
313
- # rows.append({"Expert": expert, "Attribute": attribute, "Value": value})
314
- # df = pd.DataFrame(rows)
315
- # df_confidence = df[df["Attribute"] == "confidence"].copy()
316
- # df_thinking = df[df["Attribute"] == "thinking"].copy()
317
- # df_confidence = df_confidence.merge(df_thinking, on="Expert", suffixes=("_confidence", "_thinking"))
318
- # df_confidence["Value_confidence"] = pd.to_numeric(df_confidence["Value_confidence"])
319
-
320
- # def confidence_to_color(confidence):
321
- # red = np.array([255, 0, 0])
322
- # green = np.array([0, 255, 0])
323
- # blend_ratio = confidence / 100
324
- # blended_color = (1 - blend_ratio) * red + blend_ratio * green
325
- # return f"rgb({int(blended_color[0])}, {int(blended_color[1])}, {int(blended_color[2])})"
326
-
327
- # df_confidence["Color"] = df_confidence["Value_confidence"].apply(confidence_to_color)
328
- # fig = go.Figure()
329
- # fig.add_trace(go.Bar(
330
- # y=df_confidence["Expert"],
331
- # x=df_confidence["Value_confidence"],
332
- # text=df_confidence["Value_confidence"],
333
- # hovertext=df_confidence["Value_thinking"],
334
- # orientation="h",
335
- # marker=dict(color=df_confidence["Color"]),
336
- # width=0.3,
337
- # textposition="inside"
338
- # ))
339
- # fig.update_layout(
340
- # title="Expert Confidence in Diagnosis",
341
- # xaxis_title="Confidence Score",
342
- # yaxis_title="Medical Expert",
343
- # yaxis=dict(tickmode="linear", dtick=1, automargin=True),
344
- # height=max(400, 40 * len(df_confidence)),
345
- # bargap=0.1
346
- # )
347
- # fig.update_traces(
348
- # hovertemplate="<b>%{y}</b><br>Confidence: %{x}%<br>Thinking: %{hovertext}"
349
- # )
350
- # return fig
351
-
352
- # conversation_id = "111a1"
353
- # st.set_page_config(page_title="Virtual Clinician", page_icon="🤖")
354
- # st.title("Virtual Clinician :toolbox:")
355
-
356
- # if "chat_history" not in st.session_state:
357
- # st.session_state.chat_history = [
358
- # {"role": "AI", "content": "Hello, I am the virtual clinician. How can I help you today?"},
359
- # ]
360
-
361
- # for message in st.session_state.chat_history:
362
- # with st.chat_message(message["role"]):
363
- # st.write(message["content"])
364
-
365
- # user_query = st.chat_input("Type your message here...")
366
- # if user_query:
367
- # st.session_state.chat_history.append({"role": "Human", "content": user_query})
368
- # with st.chat_message("Human"):
369
- # st.markdown(user_query)
370
-
371
- # exists, count, conversation_obj = get_conversation(conversation_id)
372
-
373
- # if exists == "FAIL":
374
- # response_data = converse_with_patient("", user_query)
375
- # st.session_state.chat_history.append({"role": "AI", "content": response_data})
376
- # store_conversation(conversation_id, user_query, response_data)
377
-
378
-
379
- # else:
380
- # response_data = converse_with_patient(st.session_state.chat_history, user_query)
381
- # st.session_state.chat_history.append({"role": "AI", "content": response_data})
382
- # update_conversation(conversation_id, conversation_id, user_query, response_data)
383
-
384
-
385
- # if count >= 6:
386
- # st.session_state.chat_history.append({"role": "Human", "content": user_query})
387
- # response_data = converse_with_patient(st.session_state.chat_history, user_query)
388
- # st.session_state.chat_history.append({"role": "AI", "content": response_data})
389
- # outcome, diagnosis = create_diagnosis(conversation_obj)
390
- # if outcome == "PASS":
391
- # st.subheader("Diagnosis Summary")
392
- # st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
393
- # st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
394
- # st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
395
- # st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
396
- # st.write(f"**Next Best Action:** {diagnosis['next_best_action_']}")
397
- # st.write(f"**Next Best Action Explanation:** {diagnosis['next_best_action_explanation']}")
398
- # st.write(f"**Next Best Action Confidence:** {diagnosis['next_best_action_confidence']}%")
399
- # st.subheader("Expert Confidence Levels")
400
- # fig = generate_expert_confidence_chart(diagnosis)
401
- # st.plotly_chart(fig)
402
- # else:
403
- # st.write("Diagnosis not available")
404
-
405
-
406
- # st.session_state.chat_history.append({"role": "AI", "content": response_data})
407
- # with st.chat_message("AI"):
408
- # st.write(response_data)
409
-
410
- # from fastapi import FastAPI, HTTPException
411
- # import streamlit as st
412
- # import pandas as pd
413
- # from pydantic import BaseModel
414
- # import numpy as np
415
- # import plotly.graph_objects as go
416
-
417
- # from azure_openai import converse_with_patient, create_diagnosis
418
- # from memory import get_conversation, store_conversation, update_conversation, retrieve_conversation
419
-
420
- # class AskQuestion(BaseModel):
421
- # user_input: str
422
- # id: str
423
-
424
- # app = FastAPI()
425
-
426
- # def generate_expert_confidence_chart(diagnosis):
427
- # """
428
- # Extracts expert confidence data from JSON and generates a multi-colored bar chart.
429
- # """
430
- # expert_distribution = diagnosis.get("expert_distribution", {})
431
- # rows = []
432
- # for key, value in expert_distribution.items():
433
- # expert, attribute = key.rsplit(", ", 1)
434
- # rows.append({"Expert": expert, "Attribute": attribute, "Value": value})
435
- # df = pd.DataFrame(rows)
436
- # df_confidence = df[df["Attribute"] == "confidence"].copy()
437
- # df_thinking = df[df["Attribute"] == "thinking"].copy()
438
- # df_confidence = df_confidence.merge(df_thinking, on="Expert", suffixes=("_confidence", "_thinking"))
439
- # df_confidence["Value_confidence"] = pd.to_numeric(df_confidence["Value_confidence"])
440
-
441
- # def confidence_to_color(confidence):
442
- # red = np.array([255, 0, 0])
443
- # green = np.array([0, 255, 0])
444
- # blend_ratio = confidence / 100
445
- # blended_color = (1 - blend_ratio) * red + blend_ratio * green
446
- # return f"rgb({int(blended_color[0])}, {int(blended_color[1])}, {int(blended_color[2])})"
447
-
448
- # df_confidence["Color"] = df_confidence["Value_confidence"].apply(confidence_to_color)
449
- # fig = go.Figure()
450
- # fig.add_trace(go.Bar(
451
- # y=df_confidence["Expert"],
452
- # x=df_confidence["Value_confidence"],
453
- # text=df_confidence["Value_confidence"],
454
- # hovertext=df_confidence["Value_thinking"],
455
- # orientation="h",
456
- # marker=dict(color=df_confidence["Color"]),
457
- # width=0.3,
458
- # textposition="inside"
459
- # ))
460
- # fig.update_layout(
461
- # title="Expert Confidence in Diagnosis",
462
- # xaxis_title="Confidence Score",
463
- # yaxis_title="Medical Expert",
464
- # yaxis=dict(tickmode="linear", dtick=1, automargin=True),
465
- # height=max(400, 40 * len(df_confidence)),
466
- # bargap=0.1
467
- # )
468
- # fig.update_traces(
469
- # hovertemplate="<b>%{y}</b><br>Confidence: %{x}%<br>Thinking: %{hovertext}"
470
- # )
471
- # return fig
472
-
473
- # conversation_id = "111a1"
474
- # st.set_page_config(page_title="Virtual Clinician", page_icon="🤖")
475
- # st.title("Virtual Clinician :toolbox:")
476
-
477
- # if "chat_history" not in st.session_state:
478
- # st.session_state.chat_history = get_conversation(conversation_id)[2] or [
479
- # {"role": "AI", "content": "Hello, I am the virtual clinician. How can I help you today?"},
480
- # ]
481
-
482
- # for message in st.session_state.chat_history:
483
- # with st.chat_message(message["role"]):
484
- # st.write(message["content"])
485
-
486
- # user_query = st.chat_input("Type your message here...")
487
- # if user_query:
488
- # st.session_state.chat_history.append({"role": "Human", "content": user_query})
489
- # with st.chat_message("Human"):
490
- # st.markdown(user_query)
491
-
492
- # exists, count, conversation_obj = get_conversation(conversation_id)
493
- # if not exists:
494
- # response_data = converse_with_patient("", user_query)
495
- # st.session_state.chat_history.append({"role": "AI", "content": response_data})
496
- # store_conversation(conversation_id, conversation_id, user_query, response_data)
497
-
498
-
499
- # response_data = converse_with_patient(st.session_state.chat_history, user_query)
500
- # st.session_state.chat_history.append({"role": "AI", "content": response_data})
501
- # update_conversation(conversation_id, conversation_id, user_query, response_data)
502
-
503
- # if count >= 6:
504
- # outcome, diagnosis = create_diagnosis(conversation_obj)
505
- # if outcome == "PASS":
506
- # st.subheader("Diagnosis Summary")
507
- # st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
508
- # st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
509
- # st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
510
- # st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
511
- # st.write(f"**Next Best Action:** {diagnosis['next_best_action_']}")
512
- # st.write(f"**Next Best Action Explanation:** {diagnosis['next_best_action_explanation']}")
513
- # st.write(f"**Next Best Action Confidence:** {diagnosis['next_best_action_confidence']}%")
514
- # st.subheader("Expert Confidence Levels")
515
- # fig = generate_expert_confidence_chart(diagnosis)
516
- # st.plotly_chart(fig)
517
- # else:
518
- # st.write("Diagnosis not available")
519
-
520
- # with st.chat_message("AI"):
521
- # st.write(response_data)
522
-
523
- # store_conversation(conversation_id, conversation_id, "", st.session_state.chat_history)
524
-
525
- # from fastapi import FastAPI, HTTPException
526
- # import streamlit as st
527
- # import pandas as pd
528
- # from pydantic import BaseModel
529
- # import numpy as np
530
- # import plotly.graph_objects as go
531
-
532
- # from azure_openai import converse_with_patient, create_diagnosis
533
- # from memory import get_conversation, store_conversation, update_conversation, retrieve_conversation
534
-
535
- # class AskQuestion(BaseModel):
536
- # user_input: str
537
- # id: str
538
-
539
- # app = FastAPI()
540
-
541
- # def generate_expert_confidence_chart(diagnosis):
542
- # """
543
- # Extracts expert confidence data from JSON and generates a multi-colored bar chart.
544
- # """
545
- # expert_distribution = diagnosis.get("expert_distribution", {})
546
- # rows = []
547
- # for key, value in expert_distribution.items():
548
- # expert, attribute = key.rsplit(", ", 1)
549
- # rows.append({"Expert": expert, "Attribute": attribute, "Value": value})
550
- # df = pd.DataFrame(rows)
551
- # df_confidence = df[df["Attribute"] == "confidence"].copy()
552
- # df_thinking = df[df["Attribute"] == "thinking"].copy()
553
- # df_confidence = df_confidence.merge(df_thinking, on="Expert", suffixes=("_confidence", "_thinking"))
554
- # df_confidence["Value_confidence"] = pd.to_numeric(df_confidence["Value_confidence"])
555
-
556
- # def confidence_to_color(confidence):
557
- # red = np.array([255, 0, 0])
558
- # green = np.array([0, 255, 0])
559
- # blend_ratio = confidence / 100
560
- # blended_color = (1 - blend_ratio) * red + blend_ratio * green
561
- # return f"rgb({int(blended_color[0])}, {int(blended_color[1])}, {int(blended_color[2])})"
562
-
563
- # df_confidence["Color"] = df_confidence["Value_confidence"].apply(confidence_to_color)
564
- # fig = go.Figure()
565
- # fig.add_trace(go.Bar(
566
- # y=df_confidence["Expert"],
567
- # x=df_confidence["Value_confidence"],
568
- # text=df_confidence["Value_confidence"],
569
- # hovertext=df_confidence["Value_thinking"],
570
- # orientation="h",
571
- # marker=dict(color=df_confidence["Color"]),
572
- # width=0.3,
573
- # textposition="inside"
574
- # ))
575
- # fig.update_layout(
576
- # title="Expert Confidence in Diagnosis",
577
- # xaxis_title="Confidence Score",
578
- # yaxis_title="Medical Expert",
579
- # yaxis=dict(tickmode="linear", dtick=1, automargin=True),
580
- # height=max(400, 40 * len(df_confidence)),
581
- # bargap=0.1
582
- # )
583
- # fig.update_traces(
584
- # hovertemplate="<b>%{y}</b><br>Confidence: %{x}%<br>Thinking: %{hovertext}"
585
- # )
586
- # return fig
587
-
588
- # conversation_id = "111a1"
589
- # st.set_page_config(page_title="Virtual Clinician", page_icon="🤖")
590
- # st.title("Virtual Clinician :toolbox:")
591
-
592
- # if "chat_history" not in st.session_state:
593
- # st.session_state.chat_history = get_conversation(conversation_id)[2] or [
594
- # {"role": "AI", "content": "Hello, I am the virtual clinician. How can I help you today?"},
595
- # ]
596
-
597
- # for message in st.session_state.chat_history:
598
- # with st.chat_message(message["role"]):
599
- # st.write(message["content"])
600
-
601
- # user_query = st.chat_input("Type your message here...")
602
- # if user_query:
603
- # st.session_state.chat_history.append({"role": "Human", "content": user_query})
604
- # with st.chat_message("Human"):
605
- # st.markdown(user_query)
606
-
607
- # exists, count, conversation_obj = get_conversation(conversation_id)
608
- # if not exists:
609
- # response = converse_with_patient("", user_query)
610
- # store_conversation(conversation_id, conversation_id, user_query, response)
611
- # exists, count, conversation_obj = get_conversation(conversation_id)
612
-
613
- # response_data = converse_with_patient(st.session_state.chat_history, user_query)
614
- # st.session_state.chat_history.append({"role": "AI", "content": response_data})
615
- # update_conversation(conversation_id, conversation_id, user_query, response_data)
616
-
617
- # if count >= 6:
618
- # outcome, diagnosis = create_diagnosis(conversation_obj)
619
- # if outcome == "PASS":
620
- # st.subheader("Diagnosis Summary")
621
- # st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
622
- # st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
623
- # st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
624
- # st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
625
- # st.write(f"**Next Best Action:** {diagnosis['next_best_action_']}")
626
- # st.write(f"**Next Best Action Explanation:** {diagnosis['next_best_action_explanation']}")
627
- # st.write(f"**Next Best Action Confidence:** {diagnosis['next_best_action_confidence']}%")
628
- # st.subheader("Expert Confidence Levels")
629
- # fig = generate_expert_confidence_chart(diagnosis)
630
- # st.plotly_chart(fig)
631
- # else:
632
- # st.write("Diagnosis not available")
633
-
634
- # with st.chat_message("AI"):
635
- # st.write(response_data)
636
-
637
- # store_conversation(conversation_id, conversation_id, "", st.session_state.chat_history)
638
-
639
- # conversation_id = "111a1"
640
- # st.set_page_config(page_title="Virtual Clinician", page_icon="🤖")
641
- # st.title("Virtual Clinician :toolbox:")
642
-
643
- # # Fetch conversation history and ensure it's a list of dictionaries
644
- # exists, count, conversation_obj = get_conversation(conversation_id)
645
-
646
- # if "chat_history" not in st.session_state:
647
- # if isinstance(conversation_obj, list) and all(isinstance(item, dict) for item in conversation_obj):
648
- # st.session_state.chat_history = conversation_obj
649
- # else:
650
- # st.session_state.chat_history = [
651
- # {"role": "AI", "content": "Hello, I am the virtual clinician. How can I help you today?"},
652
- # ]
653
-
654
- # # Ensure each message is a dictionary before accessing its keys
655
- # for message in st.session_state.chat_history:
656
- # if isinstance(message, dict) and "role" in message and "content" in message:
657
- # with st.chat_message(message["role"]):
658
- # st.write(message["content"])
659
- # else:
660
- # st.error("Invalid message format in chat history.")
661
-
662
- # user_query = st.chat_input("Type your message here...")
663
- # if user_query:
664
- # st.session_state.chat_history.append({"role": "Human", "content": user_query})
665
- # with st.chat_message("Human"):
666
- # st.markdown(user_query)
667
-
668
- # exists, count, conversation_obj = get_conversation(conversation_id)
669
-
670
- # if not exists:
671
- # response = converse_with_patient("", user_query)
672
- # store_conversation(conversation_id, conversation_id, user_query, response)
673
- # exists, count, conversation_obj = get_conversation(conversation_id)
674
-
675
- # response_data = converse_with_patient(st.session_state.chat_history, user_query)
676
- # st.session_state.chat_history.append({"role": "AI", "content": response_data})
677
- # update_conversation(conversation_id, conversation_id, user_query, response_data)
678
-
679
- # if count >= 6:
680
- # outcome, diagnosis = create_diagnosis(conversation_obj)
681
- # if outcome == "PASS":
682
- # st.subheader("Diagnosis Summary")
683
- # st.write(f"**Consensus Confidence:** {diagnosis.get('concensus_confidence', 'N/A')}%")
684
- # st.write(f"**Consensus Thinking:** {diagnosis.get('concensus_thinking', 'N/A')}")
685
- # st.write(f"**Evaluation Confidence:** {diagnosis.get('evaluate_confidence', 'N/A')}%")
686
- # st.write(f"**Evaluation Explanation:** {diagnosis.get('evaluate_explanation', 'N/A')}")
687
- # st.write(f"**Next Best Action:** {diagnosis.get('next_best_action_', 'N/A')}")
688
- # st.write(f"**Next Best Action Explanation:** {diagnosis.get('next_best_action_explanation', 'N/A')}")
689
- # st.write(f"**Next Best Action Confidence:** {diagnosis.get('next_best_action_confidence', 'N/A')}%")
690
- # st.subheader("Expert Confidence Levels")
691
- # fig = generate_expert_confidence_chart(diagnosis)
692
- # st.plotly_chart(fig)
693
- # else:
694
- # st.write("Diagnosis not available")
695
-
696
- # with st.chat_message("AI"):
697
- # st.write(response_data)
698
-
699
- # store_conversation(conversation_id, conversation_id, "", st.session_state.chat_history)
 
1
+ from fastapi import FastAPI, HTTPException
2
+ import streamlit as st
3
+ import pandas as pd
4
+ from pydantic import BaseModel, Field, validator
5
+ import numpy as np
6
+ import plotly.graph_objects as go
7
+
8
+ from azure_openai import converse_with_patient, create_diagnosis
9
+ from memory import get_conversation, store_conversation, update_conversation
10
+ import uuid
11
+
12
+ class ask_question (BaseModel):
13
+ user_input: str
14
+ id: str
15
+
16
+ app = FastAPI()
17
+
18
+
19
+ def generate_expert_confidence_chart(diagnosis):
20
+ """
21
+ Extracts expert confidence data from JSON and generates a multi-colored bar chart.
22
+ """
23
+
24
+ # Extract expert distribution data
25
+ expert_distribution = diagnosis["expert_distribution"]
26
+
27
+ # Process the data into a structured format
28
+ rows = []
29
+ for key, value in expert_distribution.items():
30
+ expert, attribute = key.rsplit(", ", 1) # Ensure splitting at the last comma
31
+ rows.append({"Expert": expert, "Attribute": attribute, "Value": value})
32
+
33
+ # Create a DataFrame
34
+ df = pd.DataFrame(rows)
35
+
36
+ # Filter the DataFrame for confidence values only
37
+ df_confidence = df[df["Attribute"] == "confidence"].copy()
38
+
39
+ # Merge confidence values with corresponding thinking explanations
40
+ df_thinking = df[df["Attribute"] == "thinking"].copy()
41
+ df_confidence = df_confidence.merge(df_thinking, on="Expert", suffixes=("_confidence", "_thinking"))
42
+
43
+ # Convert confidence values to numeric
44
+ df_confidence["Value_confidence"] = pd.to_numeric(df_confidence["Value_confidence"])
45
+
46
+ # Define a function to map confidence scores to colors
47
+ def confidence_to_color(confidence):
48
+ """
49
+ Maps confidence score (0-100) to a blended color between red (0 confidence) and green (100 confidence).
50
+ """
51
+ red = np.array([255, 0, 0])
52
+ green = np.array([0, 255, 0])
53
+ blend_ratio = confidence / 100 # Normalize between 0 and 1
54
+ blended_color = (1 - blend_ratio) * red + blend_ratio * green
55
+ return f"rgb({int(blended_color[0])}, {int(blended_color[1])}, {int(blended_color[2])})"
56
+
57
+ # Apply color mapping
58
+ df_confidence["Color"] = df_confidence["Value_confidence"].apply(confidence_to_color)
59
+
60
+ # Create the bar chart
61
+ fig = go.Figure()
62
+
63
+ # Add bars with customized colors and reduced spacing
64
+ fig.add_trace(go.Bar(
65
+ y=df_confidence["Expert"],
66
+ x=df_confidence["Value_confidence"],
67
+ text=df_confidence["Value_confidence"],
68
+ hovertext=df_confidence["Value_thinking"],
69
+ orientation="h",
70
+ marker=dict(color=df_confidence["Color"]),
71
+ width=0.3, # Reduce bar width for closer spacing
72
+ textposition="inside"
73
+ ))
74
+
75
+ # Update layout for better visibility
76
+ fig.update_layout(
77
+ title="Expert Confidence in Diagnosis",
78
+ xaxis_title="Confidence Score",
79
+ yaxis_title="Medical Expert",
80
+ yaxis=dict(tickmode="linear", dtick=1, automargin=True),
81
+ height=max(400, 40 * len(df_confidence)), # Adjust height dynamically
82
+ bargap=0.1 # Reduce spacing between bars
83
+ )
84
+
85
+ # Update hover template
86
+ fig.update_traces(
87
+ hovertemplate="<b>%{y}</b><br>Confidence: %{x}%<br>Thinking: %{hovertext}"
88
+ )
89
+
90
+ # Show the plot
91
+ return fig
92
+
93
+
94
+ # FastAPI interface routes
95
+ # @app.get("/")
96
+ # async def root():
97
+ # return {"message": "Welcome to the GenAI Symptom Checker"}
98
+
99
+ # @app.post("/ask")
100
+ # async def ask_question(question: ask_question):
101
+ # try:
102
+ # user_input = question.user_input
103
+ # conversation_id = question.id
104
+
105
+ # exists, count, conversation_obj = get_conversation(conversation_id)
106
+ # if count == 6:
107
+ # response = converse_with_patient(conversation_obj, user_input)
108
+ # store_conversation(conversation_id, conversation_id, user_input, response)
109
+ # exists, count, conversation_obj = get_conversation(conversation_id)
110
+ # diagnosis = create_diagnosis(conversation_obj)
111
+ # return {"response": response, "count": count, "diagnosis": diagnosis}
112
+ # if count > 6:
113
+ # exists, count, conversation_obj = get_conversation(conversation_id)
114
+ # diagnosis_content = next((item['content'] for item in conversation_obj if item['role'] == 'diagnosis'), None)
115
+ # return {"response": "You have reached the maximum number of questions", "count": count, "diagnosis": diagnosis_content}
116
+ # if exists == "PASS":
117
+ # response = converse_with_patient(conversation_obj, user_input)
118
+ # update_conversation(conversation_id, conversation_id, user_input, response)
119
+ # return {"response": response, "count": count, "diagnosis": "none"}
120
+
121
+ # else:
122
+ # response = converse_with_patient("",user_input)
123
+ # store_conversation(conversation_id, conversation_id, user_input, response)
124
+ # return {"response": response, "count": count, "diagnosis": "none"}
125
+
126
+ # except Exception as e:
127
+ # raise HTTPException(status_code=500, detail=str(e))
128
+
129
+ # app config
130
+
131
+ st.set_page_config(page_title="virtual clinician", page_icon=":medical_symbol:")
132
+ st.title("Virtual Clinician :medical_symbol:")
133
+
134
+ user_id = st.text_input("Name:", key="user_id")
135
+
136
+ conversation_id = user_id
137
+ # Ensure user_id is defined or fallback to a default value
138
+ if not user_id:
139
+ st.warning("Hi, Who am I speaking with?")
140
+ else:
141
+ # session state
142
+ if "chat_history" not in st.session_state:
143
+ st.session_state.chat_history = [
144
+ {"role": "AI", "content": f"Hello, {user_id} I am the virtual clinician. How can I help you today?"},
145
+ ]
146
+
147
+
148
+ # conversation
149
+ for message in st.session_state.chat_history:
150
+ if message["role"] == "AI":
151
+ with st.chat_message("AI"):
152
+ st.write(message["content"])
153
+ elif message["role"] == "Human":
154
+ with st.chat_message("Human"):
155
+ st.write(message["content"])
156
+
157
+ # user input
158
+ user_input = st.chat_input("Type your message here...")
159
+ if user_input is not None and user_input != "":
160
+ st.session_state.chat_history.append({"role": "Human", "content": user_input})
161
+
162
+
163
+ with st.chat_message("Human"):
164
+ st.markdown(user_input)
165
+
166
+ # this functions checks to see if the conversation exists
167
+ exists, count, conversation_obj = get_conversation(conversation_id)
168
+ # if the conversation does not exist, it creates a new conversation
169
+
170
+
171
+ if count > 5:
172
+ response = converse_with_patient(conversation_obj, user_input)
173
+ conversation_obj = update_conversation(conversation_id, user_input, response)
174
+ print(conversation_obj)
175
+ with st.spinner("Creating a diagnosis..."):
176
+ outcome, diagnosis = create_diagnosis(conversation_obj)
177
+ if outcome == "SUCCESS":
178
+ st.subheader("Diagnosis Summary")
179
+ st.write(f"**Diagnosis:** {diagnosis['concensus_diagnosis']}")
180
+ st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
181
+ st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
182
+ st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
183
+ st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
184
+ st.write(f"**Next Best Action:** {diagnosis['next_best_action_']}")
185
+ st.write(f"**Next Best Action Explanation:** {diagnosis['next_best_action_explanation']}")
186
+ st.write(f"**Next Best Action Confidence:** {diagnosis['next_best_action_confidence']}%")
187
+
188
+ # Generate and display the plotly chart
189
+ st.subheader("Expert Confidence Levels")
190
+ fig = generate_expert_confidence_chart(diagnosis)
191
+ st.plotly_chart(fig)
192
+
193
+ # if the diagnosis is not successful, display a message
194
+ if outcome == "FAIL1":
195
+ st.write("Diagnosis not available Failed to find concensus")
196
+ st.subheader("Incomplete Diagnosis")
197
+ st.write(f"**Diagnosis:** {diagnosis['concensus_diagnosis']}")
198
+ st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
199
+ st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
200
+ st.write(f"**Next Best Action:** See GP")
201
+ st.write(f"**Next Best Action Explanation:** Please give more details to help the AI better understand your symptoms ")
202
+
203
+ # Generate and display the plotly chart
204
+ st.subheader("Expert Confidence Levels")
205
+ fig = generate_expert_confidence_chart(diagnosis)
206
+ st.plotly_chart(fig)
207
+
208
+ if outcome == "FAIL2":
209
+ st.write("Diagnosis not available Failed to match described symptoms with know symptoms for AI diagnosis")
210
+ st.subheader("Incomplete Diagnosis")
211
+ st.write(f"**Diagnosis:** {diagnosis['concensus_diagnosis']}")
212
+ st.write(f"**Consensus Confidence:** {diagnosis['concensus_confidence']}%")
213
+ st.write(f"**Consensus Thinking:** {diagnosis['concensus_thinking']}")
214
+ st.write(f"**Evaluation Confidence:** {diagnosis['evaluate_confidence']}%")
215
+ st.write(f"**Evaluation Explanation:** {diagnosis['evaluate_explanation']}")
216
+ st.write(f"**Next Best Action:** See GP")
217
+ st.write(f"**Next Best Action Explanation:** Please give more details to help the AI better understand your symptoms ")
218
+
219
+ # Generate and display the plotly chart
220
+ st.subheader("Expert Confidence Levels")
221
+ fig = generate_expert_confidence_chart(diagnosis)
222
+ st.plotly_chart(fig)
223
+
224
+ if exists == "PASS":
225
+ response = converse_with_patient(conversation_obj, user_input)
226
+ update_conversation(conversation_id, user_input, response)
227
+ st.session_state.chat_history.append({"role": "AI", "content": response})
228
+ with st.chat_message("AI"):
229
+ st.write(response)
230
+
231
+ else:
232
+ response = converse_with_patient("",user_input)
233
+ store_conversation(conversation_id, user_input, response)
234
+ st.session_state.chat_history.append({"role": "AI", "content": response})
235
+ with st.chat_message("AI"):
236
+ st.write(response)
237
+