import gradio as gr import os from langchain.vectorstores import Chroma from langchain.document_loaders import PyPDFLoader from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings # Use Hugging Face Inference API embeddings inference_api_key = os.environ['HF'] api_hf_embeddings = HuggingFaceInferenceAPIEmbeddings( api_key=inference_api_key, model_name="sentence-transformers/all-MiniLM-l6-v2" ) # Load and process the PDF files loader = PyPDFLoader("/content/ReACT.pdf") documents = loader.load() print("-----------") print(documents) print("-----------") # Load the document, split it into chunks, embed each chunk, and load it into the vector store. text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) vdocuments = text_splitter.split_documents(documents) # Create Chroma vector store for API embeddings api_db = Chroma.from_documents(vdocuments, api_hf_embeddings, collection_name="api-collection") # Define the PDF retrieval function def pdf_retrieval(query): # Run the query through the retriever response = api_db.similarity_search(query) return response # Create Gradio interface for the API retriever # Create Gradio interface for the API retriever api_tool = gr.Interface( fn=pdf_retrieval, inputs=[gr.Textbox()], outputs=gr.Textbox(), live=True, title="API PDF Retrieval Tool", description="This tool indexes PDF documents and retrieves relevant answers based on a given query (HF Inference API Embeddings).", ) # Launch the Gradio interface api_tool.launch()