|
import gradio as gr |
|
import os |
|
|
|
from langchain.document_loaders import PyPDFLoader |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.vectorstores import Chroma |
|
from langchain.chains import ConversationalRetrievalChain |
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
from langchain.llms import HuggingFacePipeline |
|
from langchain.chains import ConversationChain |
|
from langchain.memory import ConversationBufferMemory |
|
from langchain.llms import HuggingFaceHub |
|
|
|
from pathlib import Path |
|
import chromadb |
|
|
|
from transformers import AutoTokenizer |
|
import transformers |
|
import torch |
|
import tqdm |
|
import accelerate |
|
|
|
|
|
|
|
|
|
llm_model = 'mistralai/Mixtral-8x7B-Instruct-v0.1' |
|
list_file_obj = '/home/user/app/pdfs/' |
|
chunk_size = 1024 |
|
chunk_overlap = 128 |
|
temperature = 0.1 |
|
max_tokens = 6000 |
|
top_k = 3 |
|
|
|
|
|
def load_doc(list_file_path): |
|
|
|
|
|
|
|
loaders = [PyPDFLoader(list_file_obj+x) for x in list_file_path] |
|
pages = [] |
|
for loader in loaders: |
|
pages.extend(loader.load()) |
|
|
|
text_splitter = RecursiveCharacterTextSplitter( |
|
chunk_size = chunk_size, |
|
chunk_overlap = chunk_overlap) |
|
doc_splits = text_splitter.split_documents(pages) |
|
return doc_splits |
|
|
|
|
|
|
|
|
|
def create_db(splits, collection_name): |
|
embedding = HuggingFaceEmbeddings() |
|
new_client = chromadb.EphemeralClient() |
|
vectordb = Chroma.from_documents( |
|
documents=splits, |
|
embedding=embedding, |
|
client=new_client, |
|
collection_name=collection_name, |
|
|
|
) |
|
return vectordb |
|
|
|
|
|
def load_db(): |
|
embedding = HuggingFaceEmbeddings() |
|
vectordb = Chroma( |
|
|
|
embedding_function=embedding) |
|
return vectordb |
|
|
|
|
|
|
|
def initialize_database(list_file_obj): |
|
|
|
|
|
list_file_path = os.listdir(list_file_obj) |
|
|
|
collection_name = Path(list_file_path[0]).stem |
|
|
|
|
|
collection_name = collection_name.replace(" ","-") |
|
|
|
collection_name = collection_name[:50] |
|
print(collection_name) |
|
|
|
if not collection_name[0].isalnum(): |
|
collection_name[0] = 'A' |
|
if not collection_name[-1].isalnum(): |
|
collection_name[-1] = 'Z' |
|
|
|
print('Collection name: ', collection_name) |
|
|
|
doc_splits = load_doc(list_file_path) |
|
|
|
|
|
vector_db = create_db(doc_splits, collection_name) |
|
return vector_db, collection_name |
|
|
|
|
|
def initialize_llmchain(vector_db): |
|
|
|
llm = HuggingFaceHub(repo_id = llm_model,model_kwargs={"temperature": temperature, |
|
"max_new_tokens": max_tokens, |
|
"top_k": top_k, |
|
"load_in_8bit": True}) |
|
retriever=vector_db.as_retriever() |
|
memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True) |
|
qa_chain = ConversationalRetrievalChain.from_llm(llm,retriever=retriever,chain_type="stuff", |
|
memory=memory,return_source_documents=True,verbose=False) |
|
|
|
return qa_chain |
|
|
|
def initialize_LLM(vector_db): |
|
|
|
llm_name = llm_model |
|
qa_chain = initialize_llmchain(vector_db) |
|
return qa_chain |
|
|
|
def format_chat_history(message, chat_history): |
|
formatted_chat_history = [] |
|
for user_message, bot_message in chat_history: |
|
formatted_chat_history.append(f"User: {user_message}") |
|
formatted_chat_history.append(f"Assistant: {bot_message}") |
|
return formatted_chat_history |
|
|
|
def conversation(qa_chain, message, history): |
|
formatted_chat_history = format_chat_history(message, history) |
|
|
|
|
|
|
|
response = qa_chain({"question": message, "chat_history": formatted_chat_history}) |
|
response_answer = response["answer"] |
|
if response_answer.find("Helpful Answer:") != -1: |
|
response_answer = response_answer.split("Helpful Answer:")[-1] |
|
response_sources = response["source_documents"] |
|
response_source1 = response_sources[0].page_content.strip() |
|
response_source2 = response_sources[1].page_content.strip() |
|
response_source3 = response_sources[2].page_content.strip() |
|
|
|
response_source1_page = response_sources[0].metadata["page"] + 1 |
|
response_source2_page = response_sources[1].metadata["page"] + 1 |
|
response_source3_page = response_sources[2].metadata["page"] + 1 |
|
|
|
|
|
|
|
|
|
new_history = history + [(message, response_answer)] |
|
|
|
return qa_chain, new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page |
|
|
|
def demo(): |
|
with gr.Blocks() as demo: |
|
vector_db = gr.State() |
|
qa_chain = gr.State() |
|
collection_name = gr.State() |
|
|
|
chatbot = gr.Chatbot(height=300) |
|
with gr.Accordion("References", open=True): |
|
with gr.Row(): |
|
doc_source1 = gr.Textbox(label="Reference 1", lines=5, container=True, scale=20) |
|
source1_page = gr.Number(label="Page", scale=1) |
|
with gr.Row(): |
|
doc_source2 = gr.Textbox(label="Reference 2", lines=5, container=True, scale=20) |
|
source2_page = gr.Number(label="Page", scale=1) |
|
with gr.Row(): |
|
doc_source3 = gr.Textbox(label="Reference 3", lines=5, container=True, scale=20) |
|
source3_page = gr.Number(label="Page", scale=1) |
|
with gr.Row(): |
|
msg = gr.Textbox(placeholder="Type message", container=True) |
|
with gr.Row(): |
|
|
|
|
|
submit_btn = gr.Button("Submit") |
|
clear_btn = gr.ClearButton([msg, chatbot]) |
|
|
|
|
|
vector_db, collection_name = initialize_database(list_file_obj) |
|
print(collection_name) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llm = HuggingFaceHub(repo_id = llm_model,model_kwargs={"temperature": temperature, |
|
"max_new_tokens": max_tokens, |
|
"top_k": top_k, |
|
"load_in_8bit": True}) |
|
retriever=vector_db.as_retriever() |
|
memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True) |
|
qa_chain = ConversationalRetrievalChain.from_llm(llm,retriever=retriever,chain_type="stuff", |
|
memory=memory,return_source_documents=True,verbose=False) |
|
|
|
msg.submit(conversation, \ |
|
inputs=[qa_chain, msg, chatbot], \ |
|
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \ |
|
queue=False) |
|
submit_btn.click(conversation, \ |
|
inputs=[qa_chain, msg, chatbot], \ |
|
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \ |
|
queue=False) |
|
clear_btn.click(lambda:[None,"",0,"",0,"",0], \ |
|
inputs=None, \ |
|
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \ |
|
queue=False) |
|
demo.queue().launch(debug=True) |
|
|
|
if __name__ == "__main__": |
|
demo() |