Update app.py
Browse files
app.py
CHANGED
@@ -13,6 +13,7 @@ BIOMODELS_JSON_DB_PATH = "src/cached_biomodels.json"
|
|
13 |
LOCAL_DOWNLOAD_DIR = tempfile.mkdtemp()
|
14 |
|
15 |
cached_data = None
|
|
|
16 |
|
17 |
def fetch_github_json():
|
18 |
url = f"https://api.github.com/repos/{GITHUB_OWNER}/{GITHUB_REPO_CACHE}/contents/{BIOMODELS_JSON_DB_PATH}"
|
@@ -139,35 +140,31 @@ def create_vector_db(final_items):
|
|
139 |
from chromadb.utils import embedding_functions
|
140 |
embedding_function = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="all-MiniLM-L6-v2")
|
141 |
|
|
|
142 |
db = client.get_or_create_collection(name=collection_name, embedding_function=embedding_function)
|
143 |
|
144 |
-
|
145 |
-
|
|
|
146 |
from llama_cpp import Llama
|
147 |
-
|
148 |
llm = Llama.from_pretrained(
|
149 |
-
|
150 |
-
|
151 |
)
|
152 |
|
153 |
-
documents_to_add = []
|
154 |
-
ids_to_add = []
|
155 |
-
|
156 |
for item in final_items:
|
157 |
item2 = str(item)
|
158 |
item_id = f"id_{item2[:45].replace(' ', '_')}"
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
if item_id_already_created is None: # If the ID does not exist
|
163 |
-
# Generate the LLM prompt and output
|
164 |
prompt = f"""
|
165 |
Summarize the following segment of Antimony in a clear and concise manner:
|
166 |
1. Provide a detailed summary using a limited number of words
|
167 |
2. Maintain all original values and include any mathematical expressions or values in full.
|
168 |
3. Ensure that all variable names and their values are clearly presented.
|
169 |
4. Write the summary in paragraph format, putting an emphasis on clarity and completeness.
|
170 |
-
|
171 |
Here is the antimony segment to summarize: {item}
|
172 |
"""
|
173 |
|
@@ -179,16 +176,11 @@ def create_vector_db(final_items):
|
|
179 |
stream=False
|
180 |
)
|
181 |
|
182 |
-
# Extract the generated summary text
|
183 |
final_result = output["choices"][0]["text"]
|
184 |
|
185 |
-
# Add the result to documents and its corresponding ID to the lists
|
186 |
documents_to_add.append(final_result)
|
187 |
ids_to_add.append(item_id)
|
188 |
-
else:
|
189 |
-
continue
|
190 |
|
191 |
-
# Add the new documents to the vector database, if there are any
|
192 |
if documents_to_add:
|
193 |
db.upsert(
|
194 |
documents=documents_to_add,
|
@@ -197,19 +189,17 @@ def create_vector_db(final_items):
|
|
197 |
|
198 |
return db
|
199 |
|
200 |
-
|
201 |
def generate_response(db, query_text, previous_context):
|
|
|
|
|
|
|
202 |
query_results = db.query(
|
203 |
query_texts=query_text,
|
204 |
n_results=7,
|
205 |
)
|
206 |
|
207 |
-
if not query_results.get('documents'):
|
208 |
-
return "No results found."
|
209 |
-
|
210 |
best_recommendation = query_results['documents']
|
211 |
|
212 |
-
# Prompt for LLM
|
213 |
prompt_template = f"""
|
214 |
Using the context provided below, answer the following question. If the information is insufficient to answer the question, please state that clearly.
|
215 |
|
@@ -226,8 +216,6 @@ def generate_response(db, query_text, previous_context):
|
|
226 |
Once you are done summarizing, type 'END'.
|
227 |
"""
|
228 |
|
229 |
-
# LLM call with streaming enabled
|
230 |
-
import torch
|
231 |
from llama_cpp import Llama
|
232 |
|
233 |
llm = Llama.from_pretrained(
|
@@ -235,16 +223,14 @@ def generate_response(db, query_text, previous_context):
|
|
235 |
filename="unsloth.BF16.gguf",
|
236 |
)
|
237 |
|
238 |
-
# Stream output from the LLM and display in Streamlit incrementally
|
239 |
output_stream = llm(
|
240 |
prompt_template,
|
241 |
-
stream=True,
|
242 |
temperature=0.1,
|
243 |
top_p=0.9,
|
244 |
top_k=20
|
245 |
)
|
246 |
|
247 |
-
# Use Streamlit to stream the response in real-time
|
248 |
full_response = ""
|
249 |
|
250 |
response_placeholder = st.empty()
|
@@ -255,7 +241,6 @@ def generate_response(db, query_text, previous_context):
|
|
255 |
|
256 |
return full_response
|
257 |
|
258 |
-
|
259 |
def streamlit_app():
|
260 |
global db
|
261 |
st.title("BioModelsRAG")
|
@@ -292,7 +277,6 @@ def streamlit_app():
|
|
292 |
|
293 |
st.write("Models have been processed and added to the database.")
|
294 |
|
295 |
-
# Cache the chat messages without arguments
|
296 |
@st.cache_resource
|
297 |
def get_messages():
|
298 |
if "messages" not in st.session_state:
|
@@ -301,26 +285,23 @@ def streamlit_app():
|
|
301 |
|
302 |
st.session_state.messages = get_messages()
|
303 |
|
304 |
-
# Display chat history
|
305 |
for message in st.session_state.messages:
|
306 |
with st.chat_message(message["role"]):
|
307 |
st.markdown(message["content"])
|
308 |
|
309 |
-
# Chat input will act as the query input for the model
|
310 |
if prompt := st.chat_input("Ask a question about the models:"):
|
311 |
-
# Add user input to chat
|
312 |
st.chat_message("user").markdown(prompt)
|
313 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
314 |
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
st.
|
|
|
321 |
|
322 |
-
|
323 |
-
st.session_state.messages.append({"role": "assistant", "content": response})
|
324 |
|
325 |
if __name__ == "__main__":
|
326 |
streamlit_app()
|
|
|
13 |
LOCAL_DOWNLOAD_DIR = tempfile.mkdtemp()
|
14 |
|
15 |
cached_data = None
|
16 |
+
db = None
|
17 |
|
18 |
def fetch_github_json():
|
19 |
url = f"https://api.github.com/repos/{GITHUB_OWNER}/{GITHUB_REPO_CACHE}/contents/{BIOMODELS_JSON_DB_PATH}"
|
|
|
140 |
from chromadb.utils import embedding_functions
|
141 |
embedding_function = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="all-MiniLM-L6-v2")
|
142 |
|
143 |
+
# Initialize the database
|
144 |
db = client.get_or_create_collection(name=collection_name, embedding_function=embedding_function)
|
145 |
|
146 |
+
documents_to_add = []
|
147 |
+
ids_to_add = []
|
148 |
+
|
149 |
from llama_cpp import Llama
|
150 |
+
|
151 |
llm = Llama.from_pretrained(
|
152 |
+
repo_id="xzlinuxmodels/ollama3.1",
|
153 |
+
filename="unsloth.BF16.gguf",
|
154 |
)
|
155 |
|
|
|
|
|
|
|
156 |
for item in final_items:
|
157 |
item2 = str(item)
|
158 |
item_id = f"id_{item2[:45].replace(' ', '_')}"
|
159 |
+
|
160 |
+
if db.get(item_id) is None: # If the ID does not exist
|
|
|
|
|
|
|
161 |
prompt = f"""
|
162 |
Summarize the following segment of Antimony in a clear and concise manner:
|
163 |
1. Provide a detailed summary using a limited number of words
|
164 |
2. Maintain all original values and include any mathematical expressions or values in full.
|
165 |
3. Ensure that all variable names and their values are clearly presented.
|
166 |
4. Write the summary in paragraph format, putting an emphasis on clarity and completeness.
|
167 |
+
|
168 |
Here is the antimony segment to summarize: {item}
|
169 |
"""
|
170 |
|
|
|
176 |
stream=False
|
177 |
)
|
178 |
|
|
|
179 |
final_result = output["choices"][0]["text"]
|
180 |
|
|
|
181 |
documents_to_add.append(final_result)
|
182 |
ids_to_add.append(item_id)
|
|
|
|
|
183 |
|
|
|
184 |
if documents_to_add:
|
185 |
db.upsert(
|
186 |
documents=documents_to_add,
|
|
|
189 |
|
190 |
return db
|
191 |
|
|
|
192 |
def generate_response(db, query_text, previous_context):
|
193 |
+
if db is None:
|
194 |
+
raise ValueError("Database not initialized.")
|
195 |
+
|
196 |
query_results = db.query(
|
197 |
query_texts=query_text,
|
198 |
n_results=7,
|
199 |
)
|
200 |
|
|
|
|
|
|
|
201 |
best_recommendation = query_results['documents']
|
202 |
|
|
|
203 |
prompt_template = f"""
|
204 |
Using the context provided below, answer the following question. If the information is insufficient to answer the question, please state that clearly.
|
205 |
|
|
|
216 |
Once you are done summarizing, type 'END'.
|
217 |
"""
|
218 |
|
|
|
|
|
219 |
from llama_cpp import Llama
|
220 |
|
221 |
llm = Llama.from_pretrained(
|
|
|
223 |
filename="unsloth.BF16.gguf",
|
224 |
)
|
225 |
|
|
|
226 |
output_stream = llm(
|
227 |
prompt_template,
|
228 |
+
stream=True,
|
229 |
temperature=0.1,
|
230 |
top_p=0.9,
|
231 |
top_k=20
|
232 |
)
|
233 |
|
|
|
234 |
full_response = ""
|
235 |
|
236 |
response_placeholder = st.empty()
|
|
|
241 |
|
242 |
return full_response
|
243 |
|
|
|
244 |
def streamlit_app():
|
245 |
global db
|
246 |
st.title("BioModelsRAG")
|
|
|
277 |
|
278 |
st.write("Models have been processed and added to the database.")
|
279 |
|
|
|
280 |
@st.cache_resource
|
281 |
def get_messages():
|
282 |
if "messages" not in st.session_state:
|
|
|
285 |
|
286 |
st.session_state.messages = get_messages()
|
287 |
|
|
|
288 |
for message in st.session_state.messages:
|
289 |
with st.chat_message(message["role"]):
|
290 |
st.markdown(message["content"])
|
291 |
|
|
|
292 |
if prompt := st.chat_input("Ask a question about the models:"):
|
|
|
293 |
st.chat_message("user").markdown(prompt)
|
294 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
295 |
|
296 |
+
if db is None:
|
297 |
+
st.error("Database is not initialized. Please process the models first.")
|
298 |
+
else:
|
299 |
+
response = generate_response(db, prompt, st.session_state.messages)
|
300 |
+
|
301 |
+
with st.chat_message("assistant"):
|
302 |
+
st.markdown(response)
|
303 |
|
304 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
305 |
|
306 |
if __name__ == "__main__":
|
307 |
streamlit_app()
|