Spaces:
Runtime error
Runtime error
Create index.py
Browse files
index.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.document_loaders import PyPDFLoader
|
2 |
+
from langchain.embeddings.openai import OpenAIEmbeddings
|
3 |
+
from langchain.vectorstores import ElasticVectorSearch, Pinecone, Weaviate, FAISS
|
4 |
+
from langchain.chains import RetrievalQA
|
5 |
+
from langchain.chat_models import ChatOpenAI
|
6 |
+
import os
|
7 |
+
os.environ["OPENAI_API_KEY"]
|
8 |
+
|
9 |
+
loader = PyPDFLoader("2023_GPT4All_Technical_Report.pdf")
|
10 |
+
pages = loader.load_and_split()
|
11 |
+
# print(len(pages), pages)
|
12 |
+
|
13 |
+
embeddings = OpenAIEmbeddings()
|
14 |
+
# Create DB
|
15 |
+
# db = FAISS.from_documents(pages, embeddings)
|
16 |
+
|
17 |
+
# Save the DB in your local
|
18 |
+
# db.save_local("faiss_index")
|
19 |
+
|
20 |
+
# load the DB
|
21 |
+
new_db = FAISS.load_local("faiss_index", embeddings)
|
22 |
+
|
23 |
+
# Init LLM
|
24 |
+
llm = ChatOpenAI()
|
25 |
+
qa_chain = RetrievalQA.from_chain_type(llm, retriever=new_db.as_retriever())
|
26 |
+
|
27 |
+
def ask(user_query):
|
28 |
+
res=qa_chain({"query": user_query})
|
29 |
+
return res["result"]
|