Goodnight7 commited on
Commit
7f9546e
·
verified ·
1 Parent(s): f35475e

Update utils.py

Browse files
Files changed (1) hide show
  1. utils.py +15 -9
utils.py CHANGED
@@ -5,23 +5,29 @@ from langchain_nomic.embeddings import NomicEmbeddings
5
  from langchain_core.documents import Document
6
  from langchain.retrievers.document_compressors import CohereRerank
7
  from langchain.retrievers import ContextualCompressionRetriever
8
- from langchain.retrievers import BM25Retriever, EnsembleRetriever
 
9
  from langchain_groq import ChatGroq
 
10
  from dotenv import load_dotenv
11
  from langchain_core.prompts import ChatPromptTemplate
12
  from langchain_core.runnables import Runnable, RunnableMap
13
  from langchain.schema import BaseRetriever
14
  from qdrant_client import models
15
 
 
 
 
16
  load_dotenv()
17
  #Retriever
18
  def retriever(n_docs=5):
19
- vector_database_path = "chromadb"
20
 
21
- embeddings_model = NomicEmbeddings(model="nomic-embed-text-v1.5", inference_mode="local")
 
22
 
23
 
24
- vectorstore = Chroma(collection_name="chromadb",
25
  persist_directory=vector_database_path,
26
  embedding_function=embeddings_model)
27
 
@@ -49,14 +55,14 @@ def retriever(n_docs=5):
49
  return retriever
50
 
51
  #Retriever prompt
52
- rag_prompt = """You are an assistant for question-answering tasks.
53
- The questions that you will be asked will mainly be about SUP'COM (also known as Higher School Of Communication Of Tunis).
54
  Here is the context to use to answer the question:
55
- {context}
56
- Think carefully about the above context.
57
  Now, review the user question:
58
  {input}
59
- Provide an answer to this questions using only the above context.
60
  Answer:"""
61
 
62
  # Post-processing
 
5
  from langchain_core.documents import Document
6
  from langchain.retrievers.document_compressors import CohereRerank
7
  from langchain.retrievers import ContextualCompressionRetriever
8
+ from langchain.retrievers import EnsembleRetriever
9
+ from langchain_community.retrievers import BM25Retriever
10
  from langchain_groq import ChatGroq
11
+
12
  from dotenv import load_dotenv
13
  from langchain_core.prompts import ChatPromptTemplate
14
  from langchain_core.runnables import Runnable, RunnableMap
15
  from langchain.schema import BaseRetriever
16
  from qdrant_client import models
17
 
18
+
19
+ from langchain_huggingface.embeddings import HuggingFaceEmbeddings
20
+
21
  load_dotenv()
22
  #Retriever
23
  def retriever(n_docs=5):
24
+ vector_database_path = "chromadb3"
25
 
26
+ #embeddings_model = NomicEmbeddings(model="nomic-embed-text-v1.5", inference_mode="local")
27
+ embeddings_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
28
 
29
 
30
+ vectorstore = Chroma(collection_name="chroma_db",
31
  persist_directory=vector_database_path,
32
  embedding_function=embeddings_model)
33
 
 
55
  return retriever
56
 
57
  #Retriever prompt
58
+ rag_prompt = """You are a medical chatbot designed to answer health-related questions.
59
+ The questions you will receive will primarily focus on medical topics and patient care.
60
  Here is the context to use to answer the question:
61
+ {context}
62
+ Think carefully about the above context.
63
  Now, review the user question:
64
  {input}
65
+ Provide an answer to this question using only the above context.
66
  Answer:"""
67
 
68
  # Post-processing