File size: 6,191 Bytes
3ec9224
5be8df6
3ec9224
5be8df6
 
 
 
 
 
 
 
 
3ec9224
1ef8d7c
 
 
0abb90d
5be8df6
 
 
 
 
 
fc1e558
 
5be8df6
 
 
0abb90d
1ef8d7c
5be8df6
1ef8d7c
5be8df6
 
 
1ef8d7c
 
5be8df6
 
 
0abb90d
5be8df6
fc1e558
 
 
 
5be8df6
 
9733941
5be8df6
 
fc1e558
5be8df6
 
 
0abb90d
5be8df6
9733941
138ca2e
5be8df6
 
00bd139
5be8df6
0abb90d
 
5be8df6
fc1e558
5be8df6
1ef8d7c
fc1e558
0abb90d
 
 
 
 
 
 
fc1e558
5be8df6
 
 
 
 
 
 
 
00bd139
5be8df6
 
9733941
04361a6
 
9733941
 
 
8bef1bd
9733941
 
8bef1bd
9733941
8bef1bd
5be8df6
 
 
3ca2785
00bd139
1ef8d7c
fc1e558
5be8df6
1da1e92
eb94a8f
 
9733941
ceae871
5be8df6
0abb90d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import gradio as gr
import os

from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import HuggingFaceEmbeddings 
from langchain.llms import HuggingFacePipeline
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.llms import HuggingFaceHub

from pathlib import Path
import chromadb

# Load PDF document and create doc splits
def load_doc(list_file_path, chunk_size, chunk_overlap):
    loaders = [PyPDFLoader(x) for x in list_file_path]
    pages = []
    for loader in loaders:
        pages.extend(loader.load())
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap)
    doc_splits = text_splitter.split_documents(pages)
    return doc_splits

# Create vector database
def create_db(splits, collection_name):
    embedding = HuggingFaceEmbeddings()
    new_client = chromadb.EphemeralClient()
    vectordb = Chroma.from_documents(
        documents=splits,
        embedding=embedding,
        client=new_client,
        collection_name=collection_name,
    )
    return vectordb

# Initialize langchain LLM chain
def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
    llm = HuggingFaceHub(
        repo_id=llm_model,
        model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
    )
    memory = ConversationBufferMemory(
        memory_key="chat_history",
        output_key='answer',
        return_messages=True
    )
    retriever = vector_db.as_retriever()
    qa_chain = ConversationalRetrievalChain.from_llm(
        llm,
        retriever=retriever,
        chain_type="stuff",
        memory=memory,
        return_source_documents=True,
        verbose=False,
    )
    progress(0.9, desc="Done!")
    return qa_chain

# Initialize database and LLM chain
def initialize_demo(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
    list_file_path = [x.name for x in list_file_obj if x is not None]
    collection_name = Path(list_file_path[0]).stem.replace(" ", "-")[:50]
    doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
    vector_db = create_db(doc_splits, collection_name)
    qa_chain = initialize_llmchain(
        "mistralai/Mistral-7B-Instruct-v0.2",
        0.7,
        1024,
        3,
        vector_db,
        progress
    )
    return vector_db, collection_name, qa_chain, "Complete!"

def format_chat_history(message, chat_history):
    formatted_chat_history = []
    for user_message, bot_message in chat_history:
        formatted_chat_history.append(f"User: {user_message}")
        formatted_chat_history.append(f"Assistant: {bot_message}")
    return formatted_chat_history

def conversation(qa_chain, message, history):
    formatted_chat_history = format_chat_history(message, history)
    response = qa_chain({"question": message, "chat_history": formatted_chat_history})
    response_answer = response["answer"]
    if response_answer.find("Helpful Answer:") != -1:
        response_answer = response_answer.split("Helpful Answer:")[-1]
    response_sources = response["source_documents"]
    response_source1 = response_sources[0].page_content.strip()
    response_source2 = response_sources[1].page_content.strip()
    response_source3 = response_sources[2].page_content.strip()
    response_source1_page = response_sources[0].metadata["page"] + 1
    response_source2_page = response_sources[1].metadata["page"] + 1
    response_source3_page = response_sources[2].metadata["page"] + 1
    new_history = history + [(message, response_answer)]
    return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page

def demo():
    with gr.Blocks(theme="base") as demo:
        vector_db = gr.State()
        qa_chain = gr.State()
        collection_name = gr.State()

        gr.Markdown(
        """<center><h2>PDF-based chatbot (powered by LangChain and open-source LLMs)</center></h2>
        <h3>Ask any questions about your PDF documents, along with follow-ups</h3>
        <b>Note:</b> This AI assistant performs retrieval-augmented generation from your PDF documents. \
        When generating answers, it takes past questions into account (via conversational memory), and includes document references for clarity purposes.</i>
        <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate an output.<br>
        """)

        document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
        slider_chunk_size = gr.Slider(minimum=100, maximum=1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
        slider_chunk_overlap = gr.Slider(minimum=10, maximum=200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
        db_progress = gr.Textbox(label="Vector database initialization", value="None")

        # Initialize vector database and LLM chain in the background
        vector_db, collection_name, qa_chain, status = initialize_demo([document], slider_chunk_size, slider_chunk_overlap, db_progress)

        chatbot = gr.Chatbot(height=300)
        msg = gr.Textbox(placeholder="Type message", container=True)
        submit_btn = gr.Button("Submit")
        clear_btn = gr.ClearButton([msg, chatbot])

        msg.submit(conversation, inputs=[qa_chain, msg, chatbot], outputs=[qa_chain, msg, chatbot], queue=False)
        submit_btn.click(conversation, inputs=[qa_chain, msg, chatbot], outputs=[qa_chain, msg, chatbot], queue=False)
        clear_btn.click(lambda:[None,"",0,"",0,"",0], inputs=None, outputs=[chatbot], queue=False)

    demo.queue().launch(debug=True)

if __name__ == "__main__":
    demo()