File size: 1,658 Bytes
b34502b 47575a3 a6c5429 b54046d b34502b 47575a3 a6c5429 47575a3 bd9d10e b34502b b54046d 47575a3 b54046d e03f966 47575a3 e03f966 47575a3 b34502b 47575a3 b34502b 47575a3 b34502b 47575a3 b34502b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import gradio as gr
from langchain.vectorstores import Chroma
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
# Initialize the HuggingFaceInstructEmbeddings
hf = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-large",
embed_instruction="Represent the document for retrieval: ",
query_instruction="Represent the query for retrieval: "
)
# Load and process the PDF files
from langchain.document_loaders import PyPDFDirectoryLoader
loader = PyPDFDirectoryLoader("new_papers/")
docs = loader.load()
#loader = PyPDFLoader('./new_papers/', glob="./*.pdf")
#documents = loader.load()
#splitting the text into
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
texts = text_splitter.split_documents(documents)
# Create a Chroma vector store from the PDF documents
db = Chroma.from_documents(texts, hf, collection_name="my-collection")
class VectoreStoreRetrievalTool:
def __init__(self):
self.retriever = db.as_retriever(search_kwargs={"k": 1})
def __call__(self, query):
# Run the query through the retriever
response = self.retriever.run(query)
return response['result']
# Create the Gradio interface using the PDFRetrievalTool
tool = gr.Interface(
PDFRetrievalTool(),
inputs=gr.Textbox(),
outputs=gr.Textbox(),
live=True,
title="PDF Retrieval Tool",
description="This tool indexes PDF documents and retrieves relevant answers based on a given query.",
)
# Launch the Gradio interface
tool.launch()
|