Moha782 commited on
Commit
6c6bd03
·
verified ·
1 Parent(s): dcfda02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from langchain_community.vectorstores.faiss import FAISS
4
  from langchain.chains import RetrievalQA
5
- from langchain_community.llms import HuggingFacePipeline
6
 
7
  # Load the vector store from the saved index files
8
  vector_store = FAISS.load_local("db.index", embeddings=None, allow_dangerous_deserialization=True)
@@ -11,7 +11,7 @@ vector_store = FAISS.load_local("db.index", embeddings=None, allow_dangerous_des
11
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
12
 
13
  # Initialize the HuggingFacePipeline LLM
14
- llm = HuggingFacePipeline(client, model_kwargs={"temperature": None, "top_p": None})
15
 
16
  # Initialize the RetrievalQA chain
17
  qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=vector_store.as_retriever())
 
2
  from huggingface_hub import InferenceClient
3
  from langchain_community.vectorstores.faiss import FAISS
4
  from langchain.chains import RetrievalQA
5
+ from langchain_huggingface import HuggingFacePipeline
6
 
7
  # Load the vector store from the saved index files
8
  vector_store = FAISS.load_local("db.index", embeddings=None, allow_dangerous_deserialization=True)
 
11
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
12
 
13
  # Initialize the HuggingFacePipeline LLM
14
+ llm = HuggingFacePipeline(client=client, model_kwargs={"temperature": None, "top_p": None})
15
 
16
  # Initialize the RetrievalQA chain
17
  qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=vector_store.as_retriever())