Somnath3570 commited on
Commit
76b6ffb
·
verified ·
1 Parent(s): 433a37f

Create connect_memory_with_llm.py

Browse files
Files changed (1) hide show
  1. connect_memory_with_llm.py +63 -0
connect_memory_with_llm.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from langchain_huggingface import HuggingFaceEndpoint
4
+ from langchain_core.prompts import PromptTemplate
5
+ from langchain.chains import RetrievalQA
6
+ from langchain_huggingface import HuggingFaceEmbeddings
7
+ from langchain_community.vectorstores import FAISS
8
+
9
+ ## Uncomment the following files if you're not using pipenv as your virtual environment manager
10
+ from dotenv import load_dotenv, find_dotenv
11
+ load_dotenv(find_dotenv())
12
+
13
+
14
+ # Step 1: Setup LLM (Mistral with HuggingFace)
15
+ HF_TOKEN=os.environ.get("HF_TOKEN")
16
+ HUGGINGFACE_REPO_ID="mistralai/Mistral-7B-Instruct-v0.3"
17
+
18
+ def load_llm(huggingface_repo_id):
19
+ llm=HuggingFaceEndpoint(
20
+ repo_id=huggingface_repo_id,
21
+ task="text-generation",
22
+ temperature=0.5,
23
+ model_kwargs={"token":HF_TOKEN,
24
+ "max_length":512}
25
+ )
26
+ return llm
27
+
28
+ # Step 2: Connect LLM with FAISS and Create chain
29
+
30
+ CUSTOM_PROMPT_TEMPLATE = """
31
+ Use the pieces of information provided in the context to answer user's question.
32
+ If you dont know the answer, just say that you dont know, dont try to make up an answer.
33
+ Dont provide anything out of the given context
34
+
35
+ Context: {context}
36
+ Question: {question}
37
+
38
+ Start the answer directly. No small talk please.
39
+ """
40
+
41
+ def set_custom_prompt(custom_prompt_template):
42
+ prompt=PromptTemplate(template=custom_prompt_template, input_variables=["context", "question"])
43
+ return prompt
44
+
45
+ # Load Database
46
+ DB_FAISS_PATH="vectorstore/db_faiss"
47
+ embedding_model=HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
48
+ db=FAISS.load_local(DB_FAISS_PATH, embedding_model, allow_dangerous_deserialization=True)
49
+
50
+ # Create QA chain
51
+ qa_chain=RetrievalQA.from_chain_type(
52
+ llm=load_llm(HUGGINGFACE_REPO_ID),
53
+ chain_type="stuff",
54
+ retriever=db.as_retriever(search_kwargs={'k':3}),
55
+ return_source_documents=True,
56
+ chain_type_kwargs={'prompt':set_custom_prompt(CUSTOM_PROMPT_TEMPLATE)}
57
+ )
58
+
59
+ # Now invoke with a single query
60
+ user_query=input("Write Query Here: ")
61
+ response=qa_chain.invoke({'query': user_query})
62
+ print("RESULT: ", response["result"])
63
+ print("SOURCE DOCUMENTS: ", response["source_documents"])