Sbnos commited on
Commit
398db91
·
verified ·
1 Parent(s): ae2400a

chatgptchanges

Browse files
Files changed (1) hide show
  1. app.py +38 -154
app.py CHANGED
@@ -1,8 +1,8 @@
1
  import streamlit as st
2
  import os
3
- from langchain.vectorstores import Chroma
4
- from langchain.embeddings import HuggingFaceBgeEmbeddings
5
- from langchain.llms import Together
6
  from langchain import hub
7
  from operator import itemgetter
8
  from langchain.schema.runnable import RunnableParallel
@@ -11,13 +11,13 @@ from typing import List, Tuple
11
  from langchain.chains import LLMChain
12
  from langchain.chains import RetrievalQA
13
  from langchain.schema.output_parser import StrOutputParser
14
- from langchain.memory import StreamlitChatMessageHistory
15
  from langchain.memory import ConversationBufferMemory
16
  from langchain.chains import ConversationalRetrievalChain
17
  from langchain.memory import ConversationSummaryMemory
18
  from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate
19
  from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
20
-
21
 
22
  # Load the embedding function
23
  model_name = "BAAI/bge-base-en"
@@ -28,13 +28,6 @@ embedding_function = HuggingFaceBgeEmbeddings(
28
  encode_kwargs=encode_kwargs
29
  )
30
 
31
- # Load the ChromaDB vector store
32
- # persist_directory="./mrcpchromadb/"
33
- # vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function,collection_name="mrcppassmednotes")
34
-
35
-
36
-
37
-
38
  # Load the LLM
39
  llm = Together(
40
  model="mistralai/Mixtral-8x22B-Instruct-v0.1",
@@ -56,17 +49,13 @@ llmc = Together(
56
  msgs = StreamlitChatMessageHistory(key="langchain_messages")
57
  memory = ConversationBufferMemory(chat_memory=msgs)
58
 
59
-
60
-
61
  DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
62
 
63
  def _combine_documents(
64
  docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
65
  ):
66
- doc_strings = [format_document(doc, document_prompt) for doc in docs]
67
- return document_separator.join(doc_strings)
68
-
69
-
70
 
71
  chistory = []
72
 
@@ -74,14 +63,9 @@ def store_chat_history(role: str, content: str):
74
  # Append the new message to the chat history
75
  chistory.append({"role": role, "content": content})
76
 
77
-
78
  # Define the Streamlit app
79
  def app():
80
-
81
-
82
-
83
  with st.sidebar:
84
-
85
  st.title("dochatter")
86
  # Create a dropdown selection box
87
  option = st.selectbox(
@@ -90,66 +74,31 @@ def app():
90
  )
91
  # Depending on the selected option, choose the appropriate retriever
92
  if option == 'RespiratoryFishman':
93
- persist_directory="./respfishmandbcud/"
94
- vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function,collection_name="fishmannotescud")
95
  retriever = vectordb.as_retriever(search_kwargs={"k": 5})
96
- retriever = retriever # replace with your actual retriever
97
-
98
- if option == 'RespiratoryMurray':
99
- persist_directory="./respmurray/"
100
- vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function,collection_name="respmurraynotes")
101
  retriever = vectordb.as_retriever(search_kwargs={"k": 5})
102
- retriever = retriever
103
-
104
- if option == 'MedMRCP2':
105
- persist_directory="./medmrcp2store/"
106
- vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function,collection_name="medmrcp2notes")
107
  retriever = vectordb.as_retriever(search_kwargs={"k": 5})
108
- retriever = retriever
109
-
110
- if option == 'General Medicine':
111
- persist_directory="./oxfordmedbookdir/"
112
- vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function,collection_name="oxfordmed")
113
  retriever = vectordb.as_retriever(search_kwargs={"k": 7})
114
- retriever = retriever
115
-
116
  else:
117
- persist_directory="./mrcpchromadb/"
118
- vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function,collection_name="mrcppassmednotes")
119
  retriever = vectordb.as_retriever(search_kwargs={"k": 5})
120
- retriever = retriever # replace with your actual retriever
121
- retriever = retriever # replace with your actual retriever
122
-
123
- #template = """You are an AI chatbot having a conversation with a human. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
124
- #{context}
125
- #{history}
126
- #Human: {human_input}
127
- #AI: """
128
- #prompt = PromptTemplate(input_variables=["history", "question"], template=template)
129
- #template = st.text_area("Template", value=template, height=180)
130
- #prompt2 = ChatPromptTemplate.from_template(template)
131
-
132
-
133
-
134
 
135
  # Session State
136
- # Store LLM generated responses
137
  if "messages" not in st.session_state.keys():
138
  st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
139
-
140
-
141
-
142
-
143
-
144
-
145
-
146
-
147
-
148
-
149
- ## Retry lets go
150
 
151
  _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question which contains the themes of the conversation. Do not write the question. Do not write the answer.
152
-
153
  Chat History:
154
  {chat_history}
155
  Follow Up Input: {question}
@@ -158,19 +107,14 @@ def app():
158
 
159
  template = """You are helping a doctor. Answer with what you know from the context provided. Please be as detailed and thorough. Answer the question based on the following context:
160
  {context}
161
-
162
  Question: {question}
163
  """
164
  ANSWER_PROMPT = ChatPromptTemplate.from_template(template)
165
 
166
-
167
  _inputs = RunnableParallel(
168
  standalone_question=RunnablePassthrough.assign(
169
  chat_history=lambda x: chistory
170
- )
171
- | CONDENSE_QUESTION_PROMPT
172
- | llmc
173
- | StrOutputParser(),
174
  )
175
  _context = {
176
  "context": itemgetter("standalone_question") | retriever | _combine_documents,
@@ -178,97 +122,37 @@ def app():
178
  }
179
  conversational_qa_chain = _inputs | _context | ANSWER_PROMPT | llm
180
 
181
-
182
-
183
-
184
-
185
-
186
-
187
-
188
-
189
-
190
-
191
- st.header("Ask Away!")
192
- # Display the messages
193
  for message in st.session_state.messages:
194
  with st.chat_message(message["role"]):
195
  st.write(message["content"])
196
  store_chat_history(message["role"], message["content"])
197
 
198
- # prompt = hub.pull("rlm/rag-prompt")
199
-
200
-
201
-
202
-
203
  prompts2 = st.chat_input("Say something")
204
 
205
- # Implement using different book sources, if statements
206
-
207
-
208
-
209
-
210
-
211
-
212
-
213
-
214
-
215
-
216
-
217
-
218
-
219
-
220
  if prompts2:
221
  st.session_state.messages.append({"role": "user", "content": prompts2})
222
  with st.chat_message("user"):
223
  st.write(prompts2)
224
-
225
-
226
 
227
  if st.session_state.messages[-1]["role"] != "assistant":
228
  with st.chat_message("assistant"):
229
  with st.spinner("Thinking..."):
230
- response = conversational_qa_chain.invoke(
231
- {
232
- "question": prompts2,
233
- "chat_history": chistory,
234
- }
235
- )
236
- st.write(response)
237
- message = {"role": "assistant", "content": response}
238
- st.session_state.messages.append(message)
239
-
240
-
241
-
242
-
243
-
244
-
245
-
246
-
247
-
248
-
249
-
250
-
251
- # Create a button to submit the question
252
-
253
-
254
-
255
-
256
-
257
-
258
-
259
-
260
-
261
-
262
-
263
-
264
-
265
-
266
-
267
-
268
-
269
-
270
- # Initialize history
271
- history = []
272
 
273
  if __name__ == '__main__':
274
- app()
 
1
  import streamlit as st
2
  import os
3
+ from langchain_community.vectorstores import Chroma
4
+ from langchain_community.embeddings import HuggingFaceBgeEmbeddings
5
+ from langchain_community.llms import Together
6
  from langchain import hub
7
  from operator import itemgetter
8
  from langchain.schema.runnable import RunnableParallel
 
11
  from langchain.chains import LLMChain
12
  from langchain.chains import RetrievalQA
13
  from langchain.schema.output_parser import StrOutputParser
14
+ from langchain_community.chat_message_histories import StreamlitChatMessageHistory
15
  from langchain.memory import ConversationBufferMemory
16
  from langchain.chains import ConversationalRetrievalChain
17
  from langchain.memory import ConversationSummaryMemory
18
  from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate
19
  from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
20
+ import time
21
 
22
  # Load the embedding function
23
  model_name = "BAAI/bge-base-en"
 
28
  encode_kwargs=encode_kwargs
29
  )
30
 
 
 
 
 
 
 
 
31
  # Load the LLM
32
  llm = Together(
33
  model="mistralai/Mixtral-8x22B-Instruct-v0.1",
 
49
  msgs = StreamlitChatMessageHistory(key="langchain_messages")
50
  memory = ConversationBufferMemory(chat_memory=msgs)
51
 
 
 
52
  DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
53
 
54
  def _combine_documents(
55
  docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
56
  ):
57
+ doc_strings = [format_document(doc, document_prompt) for doc in docs]
58
+ return document_separator.join(doc_strings)
 
 
59
 
60
  chistory = []
61
 
 
63
  # Append the new message to the chat history
64
  chistory.append({"role": role, "content": content})
65
 
 
66
  # Define the Streamlit app
67
  def app():
 
 
 
68
  with st.sidebar:
 
69
  st.title("dochatter")
70
  # Create a dropdown selection box
71
  option = st.selectbox(
 
74
  )
75
  # Depending on the selected option, choose the appropriate retriever
76
  if option == 'RespiratoryFishman':
77
+ persist_directory = "./respfishmandbcud/"
78
+ vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function, collection_name="fishmannotescud")
79
  retriever = vectordb.as_retriever(search_kwargs={"k": 5})
80
+ elif option == 'RespiratoryMurray':
81
+ persist_directory = "./respmurray/"
82
+ vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function, collection_name="respmurraynotes")
 
 
83
  retriever = vectordb.as_retriever(search_kwargs={"k": 5})
84
+ elif option == 'MedMRCP2':
85
+ persist_directory = "./medmrcp2store/"
86
+ vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function, collection_name="medmrcp2notes")
 
 
87
  retriever = vectordb.as_retriever(search_kwargs={"k": 5})
88
+ elif option == 'General Medicine':
89
+ persist_directory = "./oxfordmedbookdir/"
90
+ vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function, collection_name="oxfordmed")
 
 
91
  retriever = vectordb.as_retriever(search_kwargs={"k": 7})
 
 
92
  else:
93
+ persist_directory = "./mrcpchromadb/"
94
+ vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function, collection_name="mrcppassmednotes")
95
  retriever = vectordb.as_retriever(search_kwargs={"k": 5})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  # Session State
 
98
  if "messages" not in st.session_state.keys():
99
  st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
 
 
 
 
 
 
 
 
 
 
 
100
 
101
  _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question which contains the themes of the conversation. Do not write the question. Do not write the answer.
 
102
  Chat History:
103
  {chat_history}
104
  Follow Up Input: {question}
 
107
 
108
  template = """You are helping a doctor. Answer with what you know from the context provided. Please be as detailed and thorough. Answer the question based on the following context:
109
  {context}
 
110
  Question: {question}
111
  """
112
  ANSWER_PROMPT = ChatPromptTemplate.from_template(template)
113
 
 
114
  _inputs = RunnableParallel(
115
  standalone_question=RunnablePassthrough.assign(
116
  chat_history=lambda x: chistory
117
+ ) | CONDENSE_QUESTION_PROMPT | llmc | StrOutputParser(),
 
 
 
118
  )
119
  _context = {
120
  "context": itemgetter("standalone_question") | retriever | _combine_documents,
 
122
  }
123
  conversational_qa_chain = _inputs | _context | ANSWER_PROMPT | llm
124
 
125
+ st.header("Hello Doctor!")
 
 
 
 
 
 
 
 
 
 
 
126
  for message in st.session_state.messages:
127
  with st.chat_message(message["role"]):
128
  st.write(message["content"])
129
  store_chat_history(message["role"], message["content"])
130
 
 
 
 
 
 
131
  prompts2 = st.chat_input("Say something")
132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  if prompts2:
134
  st.session_state.messages.append({"role": "user", "content": prompts2})
135
  with st.chat_message("user"):
136
  st.write(prompts2)
 
 
137
 
138
  if st.session_state.messages[-1]["role"] != "assistant":
139
  with st.chat_message("assistant"):
140
  with st.spinner("Thinking..."):
141
+ for _ in range(3): # Retry up to 3 times
142
+ try:
143
+ response = conversational_qa_chain.invoke(
144
+ {
145
+ "question": prompts2,
146
+ "chat_history": chistory,
147
+ }
148
+ )
149
+ st.write(response)
150
+ message = {"role": "assistant", "content": response}
151
+ st.session_state.messages.append(message)
152
+ break
153
+ except Exception as e:
154
+ st.error(f"An error occurred: {e}")
155
+ time.sleep(2) # Wait 2 seconds before retrying
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
  if __name__ == '__main__':
158
+ app()