#This model build by Tasrif Nur Himel import gradio as gr import os from langchain_community.document_loaders import PyPDFLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.vectorstores import Chroma from langchain.chains import ConversationalRetrievalChain from langchain_huggingface import HuggingFaceEmbeddings from langchain_community.llms import HuggingFacePipeline from langchain.chains import ConversationChain from langchain.memory import ConversationBufferMemory from langchain_huggingface import HuggingFaceEndpoint from pathlib import Path import chromadb from unidecode import unidecode import re # LLM model to use llm_model = "mistralai/Mistral-7B-Instruct-v0.2" # Directory where PDFs are stored pdf_directory = "data" # Load PDF documents from the specified directory and create doc splits def load_docs_from_directory(directory_path, chunk_size, chunk_overlap): pdf_files = [os.path.join(directory_path, f) for f in os.listdir(directory_path) if f.endswith('.pdf')] loaders = [PyPDFLoader(file) for file in pdf_files] pages = [] for loader in loaders: pages.extend(loader.load()) text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap) doc_splits = text_splitter.split_documents(pages) return doc_splits, pdf_files # Create vector database def create_db(splits, collection_name): embedding = HuggingFaceEmbeddings() new_client = chromadb.PersistentClient() vectordb = Chroma.from_documents( documents=splits, embedding=embedding, client=new_client, collection_name=collection_name, ) return vectordb # Load vector database def load_db(): embedding = HuggingFaceEmbeddings() vectordb = Chroma( embedding_function=embedding) return vectordb # Initialize langchain LLM chain def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()): progress(0.5, desc="Initializing HF Hub...") llm = HuggingFaceEndpoint( repo_id=llm_model, temperature=temperature, max_new_tokens=max_tokens, top_k=top_k, load_in_8bit=True, ) progress(0.75, desc="Defining buffer memory...") memory = ConversationBufferMemory( memory_key="chat_history", output_key='answer', return_messages=True ) retriever = vector_db.as_retriever() progress(0.8, desc="Defining retrieval chain...") qa_chain = ConversationalRetrievalChain.from_llm( llm, retriever=retriever, chain_type="stuff", memory=memory, return_source_documents=True, verbose=False, ) progress(0.9, desc="Done!") return qa_chain # Generate collection name for vector database def create_collection_name(filepath): collection_name = Path(filepath).stem collection_name = collection_name.replace(" ", "-") collection_name = unidecode(collection_name) collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name) collection_name = collection_name[:50] if len(collection_name) < 3: collection_name = collection_name + 'xyz' if not collection_name[0].isalnum(): collection_name = 'A' + collection_name[1:] if not collection_name[-1].isalnum(): collection_name = collection_name[:-1] + 'Z' print('Filepath: ', filepath) print('Collection name: ', collection_name) return collection_name # Initialize database def initialize_database(chunk_size, chunk_overlap, progress=gr.Progress()): progress(0.1, desc="Loading documents from directory...") doc_splits, pdf_files = load_docs_from_directory(pdf_directory, chunk_size, chunk_overlap) collection_name = create_collection_name(pdf_files[0]) progress(0.5, desc="Generating vector database...") vector_db = create_db(doc_splits, collection_name) progress(0.9, desc="Database initialization complete!") return vector_db, collection_name, "Complete!" def initialize_LLM(llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()): print("LLM model: ", llm_model) qa_chain = initialize_llmchain(llm_model, llm_temperature, max_tokens, top_k, vector_db, progress) return qa_chain, "Complete!" def format_chat_history(message, chat_history): formatted_chat_history = [] for user_message, bot_message in chat_history: formatted_chat_history.append(f"User: {user_message}") formatted_chat_history.append(f"Assistant: {bot_message}") return formatted_chat_history def conversation(qa_chain, message, history): formatted_chat_history = format_chat_history(message, history) response = qa_chain({"question": message, "chat_history": formatted_chat_history}) response_answer = response["answer"] if response_answer.find("Helpful Answer:") != -1: response_answer = response_answer.split("Helpful Answer:")[-1] response_sources = response["source_documents"] response_source1 = response_sources[0].page_content.strip() response_source2 = response_sources[1].page_content.strip() response_source3 = response_sources[2].page_content.strip() response_source1_page = response_sources[0].metadata["page"] + 1 response_source2_page = response_sources[1].metadata["page"] + 1 response_source3_page = response_sources[2].metadata["page"] + 1 new_history = history + [(message, response_answer)] return qa_chain, gr.update( value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page def demo(): with gr.Blocks(theme="base") as demo: vector_db = gr.State() qa_chain = gr.State() collection_name = gr.State() gr.Markdown( """