Samarth991 commited on
Commit
752f8a9
·
verified ·
1 Parent(s): e6081ef

Update QnA.py

Browse files
Files changed (1) hide show
  1. QnA.py +7 -1
QnA.py CHANGED
@@ -3,6 +3,8 @@ from langchain_core.prompts import ChatPromptTemplate
3
  from langchain.chains import create_retrieval_chain
4
  from langchain.chains.summarize.chain import load_summarize_chain
5
  from langchain_community.llms.huggingface_hub import HuggingFaceHub
 
 
6
 
7
  #from Api_Key import google_plam
8
  from langchain_groq import ChatGroq
@@ -55,7 +57,7 @@ def summarize(documents,llm):
55
  return results['output_text']
56
 
57
 
58
- def get_hugging_face_model(model_id='mistralai/Mixtral-8x7B-Instruct-v0.1',temperature=0.01,max_tokens=2048,api_key=None):
59
  llm = HuggingFaceHub(
60
  huggingfacehub_api_token =api_key ,
61
  repo_id=model_id,
@@ -72,6 +74,10 @@ def Q_A(vectorstore,question,API_KEY):
72
 
73
  # Create a retriever
74
  retriever = vectorstore.as_retriever(search_type = 'similarity',search_kwargs = {'k':2},)
 
 
 
 
75
  if 'reliable' in question.lower() or 'relaibility' in question.lower():
76
  question_answer_chain = create_stuff_documents_chain(chat_llm, prompt_template_for_relaibility())
77
 
 
3
  from langchain.chains import create_retrieval_chain
4
  from langchain.chains.summarize.chain import load_summarize_chain
5
  from langchain_community.llms.huggingface_hub import HuggingFaceHub
6
+ from langchain.retrievers.document_compressors import LLMChainExtractor
7
+ from langchain.retrievers import ContextualCompressionRetriever
8
 
9
  #from Api_Key import google_plam
10
  from langchain_groq import ChatGroq
 
57
  return results['output_text']
58
 
59
 
60
+ def get_hugging_face_model(model_id='mistralai/Mixtral-8x7B-Instruct-v0.1',temperature=0.01,max_tokens=4096,api_key=None):
61
  llm = HuggingFaceHub(
62
  huggingfacehub_api_token =api_key ,
63
  repo_id=model_id,
 
74
 
75
  # Create a retriever
76
  retriever = vectorstore.as_retriever(search_type = 'similarity',search_kwargs = {'k':2},)
77
+ #Create a contextual compressor
78
+ compressor = LLMChainExtractor.from_llm(chat_llm)
79
+ compression_retriever = ContextualCompressionRetriever(base_compressor=compressor,base_retriever=retriever)
80
+
81
  if 'reliable' in question.lower() or 'relaibility' in question.lower():
82
  question_answer_chain = create_stuff_documents_chain(chat_llm, prompt_template_for_relaibility())
83