Commit
·
0b48895
1
Parent(s):
6fe5ab2
features updated
Browse files
app.py
CHANGED
@@ -1,3 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import base64
|
3 |
import zipfile
|
@@ -6,24 +90,10 @@ import streamlit as st
|
|
6 |
from byaldi import RAGMultiModalModel
|
7 |
from openai import OpenAI
|
8 |
|
9 |
-
import os
|
10 |
-
st.write("Current Working Directory:", os.getcwd())
|
11 |
-
|
12 |
-
# Function to unzip a folder if it does not exist
|
13 |
-
# def unzip_folder_if_not_exist(zip_path, extract_to):
|
14 |
-
# if not os.path.exists(extract_to):
|
15 |
-
# with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
16 |
-
# zip_ref.extractall(extract_to)
|
17 |
-
|
18 |
-
# # Example usage
|
19 |
-
# zip_path = 'medical_index.zip'
|
20 |
-
# extract_to = 'medical_index'
|
21 |
-
# unzip_folder_if_not_exist(zip_path, extract_to)
|
22 |
-
|
23 |
# Preload the RAGMultiModalModel
|
24 |
@st.cache_resource
|
25 |
def load_model():
|
26 |
-
return RAGMultiModalModel.from_index("
|
27 |
|
28 |
RAG = load_model()
|
29 |
|
@@ -35,7 +105,11 @@ client = OpenAI(api_key=api_key)
|
|
35 |
st.title("Medical Diagnostic Assistant")
|
36 |
st.write("Enter a medical query and get diagnostic recommendations along with visual references.")
|
37 |
|
38 |
-
# User input
|
|
|
|
|
|
|
|
|
39 |
query = st.text_input("Query", "What should be the appropriate diagnostic test for peptic ulcer?")
|
40 |
|
41 |
if st.button("Submit"):
|
@@ -43,40 +117,45 @@ if st.button("Submit"):
|
|
43 |
# Search using RAG model
|
44 |
with st.spinner('Retrieving information...'):
|
45 |
try:
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
53 |
|
54 |
-
# Display image
|
55 |
st.image(filename, caption="Reference Image", use_column_width=True)
|
56 |
|
57 |
# Get model response
|
58 |
response = client.chat.completions.create(
|
59 |
-
model=
|
60 |
messages=[
|
61 |
-
{"role": "system", "content": "You are a helpful assistant. You only answer the question based on the provided image"},
|
62 |
{
|
63 |
"role": "user",
|
64 |
"content": [
|
65 |
{"type": "text", "text": query},
|
66 |
-
{
|
67 |
-
"type": "image_url",
|
68 |
-
"image_url": {"url": f"data:image/jpeg;base64,{returned_page}"},
|
69 |
-
},
|
70 |
],
|
71 |
},
|
72 |
],
|
73 |
max_tokens=300,
|
74 |
)
|
75 |
-
|
76 |
# Display the response
|
77 |
st.success("Model Response:")
|
78 |
st.write(response.choices[0].message.content)
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
except Exception as e:
|
80 |
st.error(f"An error occurred: {e}")
|
81 |
else:
|
82 |
-
st.warning("Please enter a query.")
|
|
|
1 |
+
# import os
|
2 |
+
# import base64
|
3 |
+
# import zipfile
|
4 |
+
# from pathlib import Path
|
5 |
+
# import streamlit as st
|
6 |
+
# from byaldi import RAGMultiModalModel
|
7 |
+
# from openai import OpenAI
|
8 |
+
|
9 |
+
# import os
|
10 |
+
# st.write("Current Working Directory:", os.getcwd())
|
11 |
+
|
12 |
+
# # Function to unzip a folder if it does not exist
|
13 |
+
# # def unzip_folder_if_not_exist(zip_path, extract_to):
|
14 |
+
# # if not os.path.exists(extract_to):
|
15 |
+
# # with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
16 |
+
# # zip_ref.extractall(extract_to)
|
17 |
+
|
18 |
+
# # # Example usage
|
19 |
+
# # zip_path = 'medical_index.zip'
|
20 |
+
# # extract_to = 'medical_index'
|
21 |
+
# # unzip_folder_if_not_exist(zip_path, extract_to)
|
22 |
+
|
23 |
+
# # Preload the RAGMultiModalModel
|
24 |
+
# @st.cache_resource
|
25 |
+
# def load_model():
|
26 |
+
# return RAGMultiModalModel.from_index("./medical_index")
|
27 |
+
|
28 |
+
# RAG = load_model()
|
29 |
+
|
30 |
+
# # OpenAI API key from environment
|
31 |
+
# api_key = os.getenv("OPENAI_API_KEY")
|
32 |
+
# client = OpenAI(api_key=api_key)
|
33 |
+
|
34 |
+
# # Streamlit UI
|
35 |
+
# st.title("Medical Diagnostic Assistant")
|
36 |
+
# st.write("Enter a medical query and get diagnostic recommendations along with visual references.")
|
37 |
+
|
38 |
+
# # User input
|
39 |
+
# query = st.text_input("Query", "What should be the appropriate diagnostic test for peptic ulcer?")
|
40 |
+
|
41 |
+
# if st.button("Submit"):
|
42 |
+
# if query:
|
43 |
+
# # Search using RAG model
|
44 |
+
# with st.spinner('Retrieving information...'):
|
45 |
+
# try:
|
46 |
+
# returned_page = RAG.search(query, k=1)[0].base64
|
47 |
+
|
48 |
+
# # Decode and display the retrieved image
|
49 |
+
# image_bytes = base64.b64decode(returned_page)
|
50 |
+
# filename = 'retrieved_image.jpg'
|
51 |
+
# with open(filename, 'wb') as f:
|
52 |
+
# f.write(image_bytes)
|
53 |
+
|
54 |
+
# # Display image in Streamlit
|
55 |
+
# st.image(filename, caption="Reference Image", use_column_width=True)
|
56 |
+
|
57 |
+
# # Get model response
|
58 |
+
# response = client.chat.completions.create(
|
59 |
+
# model="gpt-4o-mini-2024-07-18",
|
60 |
+
# messages=[
|
61 |
+
# {"role": "system", "content": "You are a helpful assistant. You only answer the question based on the provided image"},
|
62 |
+
# {
|
63 |
+
# "role": "user",
|
64 |
+
# "content": [
|
65 |
+
# {"type": "text", "text": query},
|
66 |
+
# {
|
67 |
+
# "type": "image_url",
|
68 |
+
# "image_url": {"url": f"data:image/jpeg;base64,{returned_page}"},
|
69 |
+
# },
|
70 |
+
# ],
|
71 |
+
# },
|
72 |
+
# ],
|
73 |
+
# max_tokens=300,
|
74 |
+
# )
|
75 |
+
|
76 |
+
# # Display the response
|
77 |
+
# st.success("Model Response:")
|
78 |
+
# st.write(response.choices[0].message.content)
|
79 |
+
# except Exception as e:
|
80 |
+
# st.error(f"An error occurred: {e}")
|
81 |
+
# else:
|
82 |
+
# st.warning("Please enter a query.")
|
83 |
+
|
84 |
+
|
85 |
import os
|
86 |
import base64
|
87 |
import zipfile
|
|
|
90 |
from byaldi import RAGMultiModalModel
|
91 |
from openai import OpenAI
|
92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
# Preload the RAGMultiModalModel
|
94 |
@st.cache_resource
|
95 |
def load_model():
|
96 |
+
return RAGMultiModalModel.from_index("/home/user/app/medical_index")
|
97 |
|
98 |
RAG = load_model()
|
99 |
|
|
|
105 |
st.title("Medical Diagnostic Assistant")
|
106 |
st.write("Enter a medical query and get diagnostic recommendations along with visual references.")
|
107 |
|
108 |
+
# User input for selecting the model
|
109 |
+
model_options = ["gpt-4o", "gpt-4o-mini", "o1-preview", "o1-mini"]
|
110 |
+
selected_model = st.selectbox("Choose a GPT model", model_options)
|
111 |
+
|
112 |
+
# User input for query
|
113 |
query = st.text_input("Query", "What should be the appropriate diagnostic test for peptic ulcer?")
|
114 |
|
115 |
if st.button("Submit"):
|
|
|
117 |
# Search using RAG model
|
118 |
with st.spinner('Retrieving information...'):
|
119 |
try:
|
120 |
+
# Get top 10 images
|
121 |
+
returned_pages = RAG.search(query, k=10)
|
122 |
+
image_urls = []
|
123 |
+
for i, page in enumerate(returned_pages):
|
124 |
+
image_bytes = base64.b64decode(page.base64)
|
125 |
+
filename = f'retrieved_image_{i}.jpg'
|
126 |
+
with open(filename, 'wb') as f:
|
127 |
+
f.write(image_bytes)
|
128 |
+
image_urls.append(f"data:image/jpeg;base64,{page.base64}")
|
129 |
|
130 |
+
# Display the first image initially
|
131 |
st.image(filename, caption="Reference Image", use_column_width=True)
|
132 |
|
133 |
# Get model response
|
134 |
response = client.chat.completions.create(
|
135 |
+
model=selected_model,
|
136 |
messages=[
|
137 |
+
{"role": "system", "content": "You are a helpful assistant. You only answer the question based on the provided image and select the right option. You will need to provide the exaplanation from the context as well. DO NOT answer from your previous knowledge ; only answer from the images provided."},
|
138 |
{
|
139 |
"role": "user",
|
140 |
"content": [
|
141 |
{"type": "text", "text": query},
|
142 |
+
*[{"type": "image_url", "image_url": {"url": url}} for url in image_urls],
|
|
|
|
|
|
|
143 |
],
|
144 |
},
|
145 |
],
|
146 |
max_tokens=300,
|
147 |
)
|
148 |
+
|
149 |
# Display the response
|
150 |
st.success("Model Response:")
|
151 |
st.write(response.choices[0].message.content)
|
152 |
+
|
153 |
+
# Option to see all references
|
154 |
+
if st.button("Show References"):
|
155 |
+
st.subheader("References")
|
156 |
+
for i, page in enumerate(returned_pages):
|
157 |
+
st.image(f'retrieved_image_{i}.jpg', caption=f"Reference Image {i+1}", use_column_width=True)
|
158 |
except Exception as e:
|
159 |
st.error(f"An error occurred: {e}")
|
160 |
else:
|
161 |
+
st.warning("Please enter a query.")
|