GovindRaj commited on
Commit
fdf90b2
·
verified ·
1 Parent(s): 4e4cdb3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -10
app.py CHANGED
@@ -7,29 +7,28 @@ from langchain_community.llms import CTransformers
7
  from langchain.chains import RetrievalQA
8
  from huggingface_hub import snapshot_download
9
 
10
- # Download the FAISS database files
11
  local_dir = snapshot_download(
12
- repo_id="GovindRaj/upload-pdf",
13
- repo_type="space",
14
- local_dir="./vectorstore"
15
  )
16
 
17
- # Update the path to local directory
18
  DB_FAISS_PATH = './vectorstore/db_faiss'
19
 
 
20
  custom_prompt_template = """Use the following pieces of information to answer the user's question. If you don't know the answer, just say that you don't know, don't try to make up an answer.
21
-
22
  Context: {context}
23
  Question: {question}
24
-
25
  Only return the helpful answer below and nothing else.
26
  Helpful answer: """
27
 
28
  def set_custom_prompt():
29
- prompt = PromptTemplate(template=custom_prompt_template,
30
- input_variables=['context', 'question'])
31
  return prompt
32
 
 
33
  def retrieval_qa_chain(llm, prompt, db):
34
  qa_chain = RetrievalQA.from_chain_type(
35
  llm=llm,
@@ -40,6 +39,7 @@ def retrieval_qa_chain(llm, prompt, db):
40
  )
41
  return qa_chain
42
 
 
43
  def load_llm():
44
  model_path = "./llama-2-7b-chat.ggmlv3.q4_0.bin"
45
  llm = CTransformers(
@@ -50,6 +50,7 @@ def load_llm():
50
  )
51
  return llm
52
 
 
53
  def qa_bot():
54
  embeddings = HuggingFaceEmbeddings(
55
  model_name="sentence-transformers/all-MiniLM-L6-v2",
@@ -61,6 +62,7 @@ def qa_bot():
61
  qa = retrieval_qa_chain(llm, qa_prompt, db)
62
  return qa
63
 
 
64
  def main():
65
  st.title("Medical Chatbot")
66
 
@@ -89,4 +91,4 @@ def main():
89
  st.session_state.messages.append({"role": "assistant", "content": response["result"]})
90
 
91
  if __name__ == '__main__':
92
- main()
 
7
  from langchain.chains import RetrievalQA
8
  from huggingface_hub import snapshot_download
9
 
10
+ # Download the FAISS database files from Hugging Face
11
  local_dir = snapshot_download(
12
+ repo_id="GovindRaj/faiss-vectorstore", # Your Hugging Face repo
13
+ repo_type="space", # Dataset or space, as needed
14
+ local_dir="./vectorstore" # Local path to store downloaded data
15
  )
16
 
17
+ # Path to the local FAISS database after download
18
  DB_FAISS_PATH = './vectorstore/db_faiss'
19
 
20
+ # Define your custom prompt template for the LLM
21
  custom_prompt_template = """Use the following pieces of information to answer the user's question. If you don't know the answer, just say that you don't know, don't try to make up an answer.
 
22
  Context: {context}
23
  Question: {question}
 
24
  Only return the helpful answer below and nothing else.
25
  Helpful answer: """
26
 
27
  def set_custom_prompt():
28
+ prompt = PromptTemplate(template=custom_prompt_template, input_variables=['context', 'question'])
 
29
  return prompt
30
 
31
+ # Function to create a retrieval-based QA chain
32
  def retrieval_qa_chain(llm, prompt, db):
33
  qa_chain = RetrievalQA.from_chain_type(
34
  llm=llm,
 
39
  )
40
  return qa_chain
41
 
42
+ # Load the LLM model (e.g., LLaMA model from a local path)
43
  def load_llm():
44
  model_path = "./llama-2-7b-chat.ggmlv3.q4_0.bin"
45
  llm = CTransformers(
 
50
  )
51
  return llm
52
 
53
+ # Main chatbot logic
54
  def qa_bot():
55
  embeddings = HuggingFaceEmbeddings(
56
  model_name="sentence-transformers/all-MiniLM-L6-v2",
 
62
  qa = retrieval_qa_chain(llm, qa_prompt, db)
63
  return qa
64
 
65
+ # Streamlit main app
66
  def main():
67
  st.title("Medical Chatbot")
68
 
 
91
  st.session_state.messages.append({"role": "assistant", "content": response["result"]})
92
 
93
  if __name__ == '__main__':
94
+ main()